text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
# -*- coding: utf-8 -*- # Copyright (C) 2017 by Pedro Mendes, Virginia Tech Intellectual # Properties, Inc., University of Heidelberg, and University of # of Connecticut School of Medicine. # All rights reserved. # Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual # Properties, Inc., University of Heidelberg, and The University # of Manchester. # All rights reserved. # Copyright (C) 2009 by Pedro Mendes, Virginia Tech Intellectual # Properties, Inc., EML Research, gGmbH, University of Heidelberg, # and The University of Manchester. # All rights reserved. import COPASI import unittest from types import * import Test_CreateSimpleModel import math TIME_COURSE_DATA=""" New Model[Time] A[ParticleNumber] B[ParticleNumber] 0 100 0 0.25 94 6 0.5 88 12 0.75 81 19 1 68 32 1.25 62 38 1.5 52 48 1.75 47 53 2 36 64 2.25 31 69 2.5 28 72 2.75 26 74 3 21 79 3.25 19 81 3.5 16 84 3.75 14 86 4 10 90 4.25 9 91 4.5 8 92 4.75 8 92 5 7 93 0 100 0 0.25 86 14 0.5 81 19 0.75 74 26 1 64 36 1.25 59 41 1.5 52 48 1.75 45 55 2 36 64 2.25 31 69 2.5 28 72 2.75 27 73 3 22 78 3.25 20 80 3.5 18 82 3.75 18 82 4 17 83 4.25 17 83 4.5 15 85 4.75 12 88 5 9 91 0 100 0 0.25 90 10 0.5 83 17 0.75 71 29 1 62 38 1.25 55 45 1.5 48 52 1.75 45 55 2 45 55 2.25 36 64 2.5 30 70 2.75 28 72 3 21 79 3.25 15 85 3.5 11 89 3.75 10 90 4 9 91 4.25 8 92 4.5 7 93 4.75 5 95 5 5 95 0 100 0 0.25 89 11 0.5 79 21 0.75 70 30 1 64 36 1.25 58 42 1.5 51 49 1.75 45 55 2 40 60 2.25 34 66 2.5 28 72 2.75 22 78 3 21 79 3.25 21 79 3.5 19 81 3.75 19 81 4 15 85 4.25 11 89 4.5 11 89 4.75 9 91 5 8 92 0 100 0 0.25 90 10 0.5 77 23 0.75 65 35 1 59 41 1.25 54 46 1.5 49 51 1.75 43 57 2 38 62 2.25 37 63 2.5 32 68 2.75 28 72 3 20 80 3.25 18 82 3.5 16 84 3.75 16 84 4 15 85 4.25 15 85 4.5 13 87 4.75 12 88 5 9 91 0 100 0 0.25 86 14 0.5 72 28 0.75 67 33 1 61 39 1.25 53 47 1.5 49 51 1.75 41 59 2 36 64 2.25 28 72 2.5 25 75 2.75 21 79 3 19 81 3.25 16 84 3.5 15 85 3.75 12 88 4 8 92 4.25 7 93 4.5 6 94 4.75 5 95 5 5 95 0 100 0 0.25 89 11 0.5 81 19 0.75 71 29 1 59 41 1.25 51 49 1.5 44 56 1.75 38 62 2 35 65 2.25 31 69 2.5 29 71 2.75 25 75 3 21 79 3.25 20 80 3.5 19 81 3.75 17 83 4 15 85 4.25 14 86 4.5 12 88 4.75 10 90 5 9 91 0 100 0 0.25 87 13 0.5 74 26 0.75 64 36 1 58 42 1.25 49 51 1.5 42 58 1.75 40 60 2 35 65 2.25 32 68 2.5 28 72 2.75 26 74 3 21 79 3.25 18 82 3.5 16 84 3.75 15 85 4 14 86 4.25 10 90 4.5 9 91 4.75 7 93 5 6 94 0 100 0 0.25 87 13 0.5 75 25 0.75 72 28 1 65 35 1.25 54 46 1.5 51 49 1.75 48 52 2 41 59 2.25 39 61 2.5 35 65 2.75 32 68 3 28 72 3.25 25 75 3.5 22 78 3.75 20 80 4 20 80 4.25 19 81 4.5 18 82 4.75 16 84 5 14 86 0 100 0 0.25 92 8 0.5 82 18 0.75 73 27 1 67 33 1.25 61 39 1.5 54 46 1.75 47 53 2 40 60 2.25 36 64 2.5 29 71 2.75 26 74 3 24 76 3.25 22 78 3.5 19 81 3.75 18 82 4 17 83 4.25 16 84 4.5 15 85 4.75 13 87 5 11 89 """ class Test_RunParameterFitting(unittest.TestCase): def setUp(self): self.model=Test_CreateSimpleModel.createModel() def test_runParameterFittingOnSimpleModel(self): self.datamodel=COPASI.CRootContainer.addDatamodel() fitTask=COPASI.self.datamodel.addTask(COPASI.CCopasiTask.parameterFitting) self.assert_(fitTask!=None) self.assert_(fitTask.__class__==COPASI.CFitTask) fitMethod=fitTask.getMethod() self.assert_(fitMethod!=None) # the object must be an instance of COptMethod or a subclass thereof # (CFitMethod) self.assert_(isinstance(fitMethod,COPASI.COptMethod)) fitProblem=fitTask.getProblem() self.assert_(fitProblem!=None) self.assert_(fitProblem.__class__==COPASI.CFitProblem) experimentSet=fitProblem.getParameter("Experiment Set") self.assert_(experimentSet!=None) self.assert_(experimentSet.__class__ == COPASI.CExperimentSet) self.assert_(experimentSet.getExperimentCount() == 0) # first experiment experiment=COPASI.CExperiment() self.assert_(experiment!=None) self.assert_(experiment.__class__==COPASI.CExperiment) experiment.setFileName("parameter_fitting_data_simple.txt") experiment.setFirstRow(1) self.assert_(experiment.getFirstRow()==1) experiment.setLastRow(22) self.assert_(experiment.getLastRow()==22) experiment.setHeaderRow(1) self.assert_(experiment.getHeaderRow()==1) experiment.setExperimentType(COPASI.CCopasiTask.timeCourse) self.assert_(experiment.getExperimentType()==COPASI.CCopasiTask.timeCourse) experiment.setNumColumns(3) self.assert_(experiment.getNumColumns()==3) objectMap=experiment.getObjectMap() self.assert_(objectMap!=None) self.assert_(objectMap.__class__==COPASI.CExperimentObjectMap) result=objectMap.setNumCols(3) self.assert_(result==True) result=objectMap.setRole(0,COPASI.CExperiment.time) self.assert_(result==True) self.assert_(objectMap.getRole(0)==COPASI.CExperiment.time) model=self.datamodel.getModel() self.assert_(model!=None) self.assert_(model.__class__==COPASI.CModel) timeReference=model.getObject(COPASI.CCommonName("Reference=Time")) self.assert_(timeReference!=None) self.assert_(timeReference.__class__==COPASI.CDataObject) objectMap.setObjectCN(0,timeReference.getCN()) # getObjectCN returns a string whereas getCN returns a # CCommonName self.assert_(objectMap.getObjectCN(0)==timeReference.getCN().getString()) metabA=model.getMetabolite(0) self.assert_(metabA!=None) self.assert_(metabA.__class__==COPASI.CMetab) metabB=None if(metabA.getObjectName()!="A"): metabB=metabA metabA=model.getMetabolite(1) self.assert_(metabA!=None) self.assert_(metabA.__class__==COPASI.CMetab) else: metabB=model.getMetabolite(1) self.assert_(metabB!=None) self.assert_(metabB.__class__==COPASI.CMetab) objectMap.setRole(1,COPASI.CExperiment.dependent) particleReference=metabA.getObject(COPASI.CCommonName("Reference=ParticleNumber")) self.assert_(particleReference!=None) self.assert_(particleReference.__class__==COPASI.CDataObject) objectMap.setObjectCN(1,particleReference.getCN()) # getObjectCN returns a string whereas getCN returns a # CCommonName self.assert_(objectMap.getObjectCN(1)==particleReference.getCN().getString()) objectMap.setRole(2,COPASI.CExperiment.dependent) particleReference=metabB.getObject(COPASI.CCommonName("Reference=ParticleNumber")) self.assert_(particleReference!=None) self.assert_(particleReference.__class__==COPASI.CDataObject) objectMap.setObjectCN(2,particleReference.getCN()) # getObjectCN returns a string whereas getCN returns a # CCommonName self.assert_(objectMap.getObjectCN(2)==particleReference.getCN().getString()) # reading from string is not possible with the current C++ API #result=experiment.read(TIME_COURSE_DATA,False) #self.assert_(result==True) experimentSet.addExperiment(experiment) self.assert_(experimentSet.getExperimentCount()==1) # addExperiment makes a copy, so we need to get the added experiment # again experiment=experimentSet.getExperiment(0) self.assert_(experiment!=None) self.assert_(experiment.__class__==COPASI.CExperiment) reaction=model.getReaction(0) self.assert_(reaction!=None) self.assert_(reaction.__class__==COPASI.CReaction) self.assert_(reaction.isLocalParameter(0)==True) parameter=reaction.getParameters().getParameter(0) self.assert_(parameter!=None) self.assert_(parameter.__class__==COPASI.CCopasiParameter) # define CFitItems self.assert_(fitProblem.getOptItemSize()==0) parameterReference=parameter.getObject(COPASI.CCommonName("Reference=Value")) self.assert_(parameterReference!=None) self.assert_(parameterReference.__class__==COPASI.CDataObject) fitItem=COPASI.CFitItem() self.assert_(fitItem!=None) self.assert_(fitItem.__class__==COPASI.CFitItem) fitItem.setObjectCN(parameterReference.getCN()) fitItem.setStartValue(4.0) fitItem.setLowerBound(COPASI.CCommonName("0.0001")) fitItem.setUpperBound(COPASI.CCommonName("10")) # add the experiment to the fit item #fitItem.addExperiment(experiment.getKey()) self.assert_(fitItem.getStartValue()==4.0) #self.assert_(fitItem.getExperimentCount()==1) # add the fit item to the correct parameter group optimizationItemGroup=fitProblem.getParameter("OptimizationItemList") self.assert_(optimizationItemGroup!=None) self.assert_(optimizationItemGroup.__class__==COPASI.CCopasiParameterGroup) self.assert_(optimizationItemGroup.size()==0) optimizationItemGroup.addParameter(fitItem) self.assert_(optimizationItemGroup.size()==1) # addParameter makes a copy of the fit item, so we have to get it back fitItem=optimizationItemGroup.getParameter(0) self.assert_(fitItem!=None) self.assert_(fitItem.__class__==COPASI.CFitItem) result=True try: result=fitTask.process(True) except: result=False self.assert_(result==True) # just check if the result is in the correct range. The actual value is # probably off since we use only one experiment to fit self.assert_((fitItem.getLocalValue()-0.5)/0.5 < 1.0) def test_runParameterFittingOnExtendedModel(self): Test_CreateSimpleModel.extendModel(self.model) def suite(): tests=[ 'test_runParameterFittingOnSimpleModel' ,'test_runParameterFittingOnExtendedModel' ] return unittest.TestSuite(map(Test_RunParameterFitting,tests)) if(__name__ == '__main__'): unittest.TextTestRunner(verbosity=2).run(suite())
jonasfoe/COPASI
copasi/bindings/python/unittests/Test_RunParameterFitting.py
Python
artistic-2.0
10,122
[ "COPASI" ]
76afea6efa945ede8401333bd0aa25216382c3853425f456f3b2422510f3e1cb
# MIT License # # Copyright (c) 2017 Anders Steen Christensen, Lars Andersen Bratholm and Bing Huang # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import print_function import numpy as np import itertools as itl from .frepresentations import fgenerate_coulomb_matrix from .frepresentations import fgenerate_unsorted_coulomb_matrix from .frepresentations import fgenerate_local_coulomb_matrix from .frepresentations import fgenerate_atomic_coulomb_matrix from .frepresentations import fgenerate_eigenvalue_coulomb_matrix from .frepresentations import fgenerate_bob from .data import NUCLEAR_CHARGE from .slatm import get_boa from .slatm import get_sbop from .slatm import get_sbot def vector_to_matrix(v): """ Converts a representation from 1D vector to 2D square matrix. :param v: 1D input representation. :type v: numpy array :return: Square matrix representation. :rtype: numpy array """ if not (np.sqrt(8*v.shape[0]+1) == int(np.sqrt(8*v.shape[0]+1))): print("ERROR: Can not make a square matrix.") exit(1) n = v.shape[0] l = (-1 + int(np.sqrt(8*n+1)))//2 M = np.empty((l,l)) index = 0 for i in range(l): for j in range(l): if j > i: continue M[i,j] = v[index] M[j,i] = M[i,j] index += 1 return M def generate_coulomb_matrix(nuclear_charges, coordinates, size = 23, sorting = "row-norm"): """ Creates a Coulomb Matrix representation of a molecule. Sorting of the elements can either be done by ``sorting="row-norm"`` or ``sorting="unsorted"``. A matrix :math:`M` is constructed with elements .. math:: M_{ij} = \\begin{cases} \\tfrac{1}{2} Z_{i}^{2.4} & \\text{if } i = j \\\\ \\frac{Z_{i}Z_{j}}{\\| {\\bf R}_{i} - {\\bf R}_{j}\\|} & \\text{if } i \\neq j \\end{cases}, where :math:`i` and :math:`j` are atom indices, :math:`Z` is nuclear charge and :math:`\\bf R` is the coordinate in euclidean space. If ``sorting = 'row-norm'``, the atom indices are reordered such that :math:`\\sum_j M_{1j}^2 \\geq \\sum_j M_{2j}^2 \\geq ... \\geq \\sum_j M_{nj}^2` The upper triangular of M, including the diagonal, is concatenated to a 1D vector representation. If ``sorting = 'unsorted``, the elements are sorted in the same order as the input coordinates and nuclear charges. The representation is calculated using an OpenMP parallel Fortran routine. :param nuclear_charges: Nuclear charges of the atoms in the molecule :type nuclear_charges: numpy array :param coordinates: 3D Coordinates of the atoms in the molecule :type coordinates: numpy array :param size: The size of the largest molecule supported by the representation :type size: integer :param sorting: How the atom indices are sorted ('row-norm', 'unsorted') :type sorting: string :return: 1D representation - shape (size(size+1)/2,) :rtype: numpy array """ if (sorting == "row-norm"): return fgenerate_coulomb_matrix(nuclear_charges, \ coordinates, size) elif (sorting == "unsorted"): return fgenerate_unsorted_coulomb_matrix(nuclear_charges, \ coordinates, size) else: print("ERROR: Unknown sorting scheme requested") raise SystemExit def generate_atomic_coulomb_matrix(nuclear_charges, coordinates, size = 23, sorting = "distance", central_cutoff = 1e6, central_decay = -1, interaction_cutoff = 1e6, interaction_decay = -1, indices = None): """ Creates a Coulomb Matrix representation of the local environment of a central atom. For each central atom :math:`k`, a matrix :math:`M` is constructed with elements .. math:: M_{ij}(k) = \\begin{cases} \\tfrac{1}{2} Z_{i}^{2.4} \\cdot f_{ik}^2 & \\text{if } i = j \\\\ \\frac{Z_{i}Z_{j}}{\\| {\\bf R}_{i} - {\\bf R}_{j}\\|} \\cdot f_{ik}f_{jk}f_{ij} & \\text{if } i \\neq j \\end{cases}, where :math:`i`, :math:`j` and :math:`k` are atom indices, :math:`Z` is nuclear charge and :math:`\\bf R` is the coordinate in euclidean space. :math:`f_{ij}` is a function that masks long range effects: .. math:: f_{ij} = \\begin{cases} 1 & \\text{if } \\|{\\bf R}_{i} - {\\bf R}_{j} \\| \\leq r - \Delta r \\\\ \\tfrac{1}{2} \\big(1 + \\cos\\big(\\pi \\tfrac{\\|{\\bf R}_{i} - {\\bf R}_{j} \\| - r + \Delta r}{\Delta r} \\big)\\big) & \\text{if } r - \Delta r < \\|{\\bf R}_{i} - {\\bf R}_{j} \\| \\leq r - \Delta r \\\\ 0 & \\text{if } \\|{\\bf R}_{i} - {\\bf R}_{j} \\| > r \\end{cases}, where the parameters ``central_cutoff`` and ``central_decay`` corresponds to the variables :math:`r` and :math:`\Delta r` respectively for interactions involving the central atom, and ``interaction_cutoff`` and ``interaction_decay`` corresponds to the variables :math:`r` and :math:`\Delta r` respectively for interactions not involving the central atom. if ``sorting = 'row-norm'``, the atom indices are ordered such that :math:`\\sum_j M_{1j}(k)^2 \\geq \\sum_j M_{2j}(k)^2 \\geq ... \\geq \\sum_j M_{nj}(k)^2` if ``sorting = 'distance'``, the atom indices are ordered such that .. math:: \\|{\\bf R}_{1} - {\\bf R}_{k}\\| \\leq \\|{\\bf R}_{2} - {\\bf R}_{k}\\| \\leq ... \\leq \\|{\\bf R}_{n} - {\\bf R}_{k}\\| The upper triangular of M, including the diagonal, is concatenated to a 1D vector representation. The representation can be calculated for a subset by either specifying ``indices = [0,1,...]``, where :math:`[0,1,...]` are the requested atom indices, or by specifying ``indices = 'C'`` to only calculate central carbon atoms. The representation is calculated using an OpenMP parallel Fortran routine. :param nuclear_charges: Nuclear charges of the atoms in the molecule :type nuclear_charges: numpy array :param coordinates: 3D Coordinates of the atoms in the molecule :type coordinates: numpy array :param size: The size of the largest molecule supported by the representation :type size: integer :param sorting: How the atom indices are sorted ('row-norm', 'distance') :type sorting: string :param central_cutoff: The distance from the central atom, where the coulomb interaction element will be zero :type central_cutoff: float :param central_decay: The distance over which the the coulomb interaction decays from full to none :type central_decay: float :param interaction_cutoff: The distance between two non-central atom, where the coulomb interaction element will be zero :type interaction_cutoff: float :param interaction_decay: The distance over which the the coulomb interaction decays from full to none :type interaction_decay: float :param indices: Subset indices or atomtype :type indices: Nonetype/array/string :return: nD representation - shape (:math:`N_{atoms}`, size(size+1)/2) :rtype: numpy array """ if indices == None: nindices = len(nuclear_charges) indices = np.arange(1,1+nindices, 1, dtype = int) elif type("") == type(indices): if indices in NUCLEAR_CHARGE: indices = np.where(nuclear_charges == NUCLEAR_CHARGE[indices])[0] + 1 nindices = indices.size if nindices == 0: return np.zeros((0,0)) else: print("ERROR: Unknown value %s given for 'indices' variable" % indices) raise SystemExit else: indices = np.asarray(indices, dtype = int) + 1 nindices = indices.size if (sorting == "row-norm"): return fgenerate_local_coulomb_matrix(indices, nindices, nuclear_charges, coordinates, nuclear_charges.size, size, central_cutoff, central_decay, interaction_cutoff, interaction_decay) elif (sorting == "distance"): return fgenerate_atomic_coulomb_matrix(indices, nindices, nuclear_charges, coordinates, nuclear_charges.size, size, central_cutoff, central_decay, interaction_cutoff, interaction_decay) else: print("ERROR: Unknown sorting scheme requested") raise SystemExit def generate_eigenvalue_coulomb_matrix(nuclear_charges, coordinates, size = 23): """ Creates an eigenvalue Coulomb Matrix representation of a molecule. A matrix :math:`M` is constructed with elements .. math:: M_{ij} = \\begin{cases} \\tfrac{1}{2} Z_{i}^{2.4} & \\text{if } i = j \\\\ \\frac{Z_{i}Z_{j}}{\\| {\\bf R}_{i} - {\\bf R}_{j}\\|} & \\text{if } i \\neq j \\end{cases}, where :math:`i` and :math:`j` are atom indices, :math:`Z` is nuclear charge and :math:`\\bf R` is the coordinate in euclidean space. The molecular representation of the molecule is then the sorted eigenvalues of M. The representation is calculated using an OpenMP parallel Fortran routine. :param nuclear_charges: Nuclear charges of the atoms in the molecule :type nuclear_charges: numpy array :param coordinates: 3D Coordinates of the atoms in the molecule :type coordinates: numpy array :param size: The size of the largest molecule supported by the representation :type size: integer :return: 1D representation - shape (size, ) :rtype: numpy array """ return fgenerate_eigenvalue_coulomb_matrix(nuclear_charges, coordinates, size) def generate_bob(nuclear_charges, coordinates, atomtypes, size=23, asize = {"O":3, "C":7, "N":3, "H":16, "S":1}): """ Creates a Bag of Bonds (BOB) representation of a molecule. The representation expands on the coulomb matrix representation. For each element a bag (vector) is constructed for self interactions (e.g. ('C', 'H', 'O')). For each element pair a bag is constructed for interatomic interactions (e.g. ('CC', 'CH', 'CO', 'HH', 'HO', 'OO')), sorted by value. The self interaction of element :math:`I` is given by :math:`\\tfrac{1}{2} Z_{I}^{2.4}`, with :math:`Z_{i}` being the nuclear charge of element :math:`i` The interaction between atom :math:`i` of element :math:`I` and atom :math:`j` of element :math:`J` is given by :math:`\\frac{Z_{I}Z_{J}}{\\| {\\bf R}_{i} - {\\bf R}_{j}\\|}` with :math:`R_{i}` being the euclidean coordinate of atom :math:`i`. The sorted bags are concatenated to an 1D vector representation. The representation is calculated using an OpenMP parallel Fortran routine. :param nuclear_charges: Nuclear charges of the atoms in the molecule :type nuclear_charges: numpy array :param coordinates: 3D Coordinates of the atoms in the molecule :type coordinates: numpy array :param size: The maximum number of atoms in the representation :type size: integer :param asize: The maximum number of atoms of each element type supported by the representation :type asize: dictionary :return: 1D representation :rtype: numpy array """ n = 0 atoms = sorted(asize, key=asize.get) nmax = [asize[key] for key in atoms] ids = np.zeros(len(nmax), dtype=int) for i, (key, value) in enumerate(zip(atoms,nmax)): n += value * (1+value) ids[i] = NUCLEAR_CHARGE[key] for j in range(i): v = nmax[j] n += 2 * value * v n /= 2 return fgenerate_bob(nuclear_charges, coordinates, nuclear_charges, ids, nmax, n) def get_slatm_mbtypes(nuclear_charges, pbc='000'): """ Get the list of minimal types of many-body terms in a dataset. This resulting list is necessary as input in the ``generate_slatm_representation()`` function. :param nuclear_charges: A list of the nuclear charges for each compound in the dataset. :type nuclear_charges: list of numpy arrays :param pbc: periodic boundary condition along x,y,z direction, defaulted to '000', i.e., molecule :type pbc: string :return: A list containing the types of many-body terms. :rtype: list """ zs = nuclear_charges nm = len(zs) zsmax = set() nas = [] zs_ravel = [] for zsi in zs: na = len(zsi); nas.append(na) zsil = list(zsi); zs_ravel += zsil zsmax.update( zsil ) zsmax = np.array( list(zsmax) ) nass = [] for i in range(nm): zsi = np.array(zs[i],np.int) nass.append( [ (zi == zsi).sum() for zi in zsmax ] ) nzmax = np.max(np.array(nass), axis=0) nzmax_u = [] if pbc != '000': # the PBC will introduce new many-body terms, so set # nzmax to 3 if it's less than 3 for nzi in nzmax: if nzi <= 2: nzi = 3 nzmax_u.append(nzi) nzmax = nzmax_u boas = [ [zi,] for zi in zsmax ] bops = [ [zi,zi] for zi in zsmax ] + list( itl.combinations(zsmax,2) ) bots = [] for i in zsmax: for bop in bops: j,k = bop tas = [ [i,j,k], [i,k,j], [j,i,k] ] for tasi in tas: if (tasi not in bots) and (tasi[::-1] not in bots): nzsi = [ (zj == tasi).sum() for zj in zsmax ] if np.all(nzsi <= nzmax): bots.append( tasi ) mbtypes = boas + bops + bots return mbtypes #, np.array(zs_ravel), np.array(nas) def generate_slatm(coordinates, nuclear_charges, mbtypes, unit_cell=None, local=False, sigmas=[0.05,0.05], dgrids=[0.03,0.03], rcut=4.8, alchemy=False, pbc='000', rpower=6): """ Generate Spectrum of London and Axillrod-Teller-Muto potential (SLATM) representation. Both global (``local=False``) and local (``local=True``) SLATM are available. A version that works for periodic boundary conditions will be released soon. NOTE: You will need to run the ``get_slatm_mbtypes()`` function to get the ``mbtypes`` input (or generate it manually). :param coordinates: Input coordinates :type coordinates: numpy array :param nuclear_charges: List of nuclear charges. :type nuclear_charges: numpy array :param mbtypes: Many-body types for the whole dataset, including 1-, 2- and 3-body types. Could be obtained by calling ``get_slatm_mbtypes()``. :type mbtypes: list :param local: Generate a local representation. Defaulted to False (i.e., global representation); otherwise, atomic version. :type local: bool :param sigmas: Controlling the width of Gaussian smearing function for 2- and 3-body parts, defaulted to [0.05,0.05], usually these do not need to be adjusted. :type sigmas: list :param dgrids: The interval between two sampled internuclear distances and angles, defaulted to [0.03,0.03], no need for change, compromised for speed and accuracy. :type dgrids: list :param rcut: Cut-off radius, defaulted to 4.8 Angstrom. :type rcut: float :param alchemy: Swith to use the alchemy version of SLATM. (default=False) :type alchemy: bool :param pbc: defaulted to '000', meaning it's a molecule; the three digits in the string corresponds to x,y,z direction :type pbc: string :param rpower: The power of R in 2-body potential, defaulted to London potential (=6). :type rpower: float :return: 1D SLATM representation :rtype: numpy array """ c = unit_cell iprt=False if c is None: c = np.array([[1,0,0],[0,1,0],[0,0,1]]) if pbc != '000': # print(' -- handling systems with periodic boundary condition') assert c != None, 'ERROR: Please specify unit cell for SLATM' # ======================================================================= # PBC may introduce new many-body terms, so at the stage of get statistics # info from db, we've already considered this point by letting maximal number # of nuclear charges being 3. # ======================================================================= zs = nuclear_charges na = len(zs) coords = coordinates obj = [ zs, coords, c ] iloc = local if iloc: mbs = [] X2Ns = [] for ia in range(na): # if iprt: print ' -- ia = ', ia + 1 n1 = 0; n2 = 0; n3 = 0 mbs_ia = np.zeros(0) icount = 0 for mbtype in mbtypes: if len(mbtype) == 1: mbsi = get_boa(mbtype[0], np.array([zs[ia],])) #print ' -- mbsi = ', mbsi if alchemy: n1 = 1 n1_0 = mbs_ia.shape[0] if n1_0 == 0: mbs_ia = np.concatenate( (mbs_ia, mbsi), axis=0 ) elif n1_0 == 1: mbs_ia += mbsi else: raise '#ERROR' else: n1 += len(mbsi) mbs_ia = np.concatenate( (mbs_ia, mbsi), axis=0 ) elif len(mbtype) == 2: #print ' 001, pbc = ', pbc mbsi = get_sbop(mbtype, obj, iloc=iloc, ia=ia, \ sigma=sigmas[0], dgrid=dgrids[0], rcut=rcut, \ pbc=pbc, rpower=rpower) mbsi *= 0.5 # only for the two-body parts, local rpst #print ' 002' if alchemy: n2 = len(mbsi) n2_0 = mbs_ia.shape[0] if n2_0 == n1: mbs_ia = np.concatenate( (mbs_ia, mbsi), axis=0 ) elif n2_0 == n1 + n2: t = mbs_ia[n1:n1+n2] + mbsi mbs_ia[n1:n1+n2] = t else: raise '#ERROR' else: n2 += len(mbsi) mbs_ia = np.concatenate( (mbs_ia, mbsi), axis=0 ) else: # len(mbtype) == 3: mbsi = get_sbot(mbtype, obj, iloc=iloc, ia=ia, \ sigma=sigmas[1], dgrid=dgrids[1], rcut=rcut, pbc=pbc) if alchemy: n3 = len(mbsi) n3_0 = mbs_ia.shape[0] if n3_0 == n1 + n2: mbs_ia = np.concatenate( (mbs_ia, mbsi), axis=0 ) elif n3_0 == n1 + n2 + n3: t = mbs_ia[n1+n2:n1+n2+n3] + mbsi mbs_ia[n1+n2:n1+n2+n3] = t else: raise '#ERROR' else: n3 += len(mbsi) mbs_ia = np.concatenate( (mbs_ia, mbsi), axis=0 ) mbs.append( mbs_ia ) X2N = [n1,n2,n3]; if X2N not in X2Ns: X2Ns.append(X2N) assert len(X2Ns) == 1, '#ERROR: multiple `X2N ???' else: n1 = 0; n2 = 0; n3 = 0 mbs = np.zeros(0) for mbtype in mbtypes: if len(mbtype) == 1: mbsi = get_boa(mbtype[0], zs) if alchemy: n1 = 1 n1_0 = mbs.shape[0] if n1_0 == 0: mbs = np.concatenate( (mbs, [sum(mbsi)] ), axis=0 ) elif n1_0 == 1: mbs += sum(mbsi ) else: raise '#ERROR' else: n1 += len(mbsi) mbs = np.concatenate( (mbs, mbsi), axis=0 ) elif len(mbtype) == 2: mbsi = get_sbop(mbtype, obj, sigma=sigmas[0], \ dgrid=dgrids[0], rcut=rcut, rpower=rpower) if alchemy: n2 = len(mbsi) n2_0 = mbs.shape[0] if n2_0 == n1: mbs = np.concatenate( (mbs, mbsi), axis=0 ) elif n2_0 == n1 + n2: t = mbs[n1:n1+n2] + mbsi mbs[n1:n1+n2] = t else: raise '#ERROR' else: n2 += len(mbsi) mbs = np.concatenate( (mbs, mbsi), axis=0 ) else: # len(mbtype) == 3: mbsi = get_sbot(mbtype, obj, sigma=sigmas[1], \ dgrid=dgrids[1], rcut=rcut) if alchemy: n3 = len(mbsi) n3_0 = mbs.shape[0] if n3_0 == n1 + n2: mbs = np.concatenate( (mbs, mbsi), axis=0 ) elif n3_0 == n1 + n2 + n3: t = mbs[n1+n2:n1+n2+n3] + mbsi mbs[n1+n2:n1+n2+n3] = t else: raise '#ERROR' else: n3 += len(mbsi) mbs = np.concatenate( (mbs, mbsi), axis=0 ) return mbs
qmlcode/qml
qml/representations.py
Python
mit
22,846
[ "Gaussian" ]
678793d89b612c33af984a8888af28ec416f9e358f6cc44dc523c856aaac8bab
# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### # <pep8 compliant> """The NeuroBlender imports (surfaces) module. NeuroBlender is a Blender add-on to create artwork from neuroscientific data. This module implements importing surfaces into NeuroBlender. """ import os import numpy as np from mathutils import Matrix import bpy from bpy.types import (Operator, OperatorFileListElement) from bpy.props import (BoolProperty, StringProperty, CollectionProperty, EnumProperty, FloatVectorProperty, FloatProperty) from bpy_extras.io_utils import ImportHelper from .. import (materials as nb_ma, utils as nb_ut) class NB_OT_import_surfaces(Operator, ImportHelper): bl_idname = "nb.import_surfaces" bl_label = "Import surfaces" bl_description = "Import surfaces as mesh data" bl_options = {"REGISTER", "UNDO", "PRESET"} directory = StringProperty(subtype="FILE_PATH") files = CollectionProperty(name="Filepath", type=OperatorFileListElement) filter_glob = StringProperty( options={"HIDDEN"}, # NOTE: multiline comment """ """ not working here default="*.obj;*.stl;" + "*.gii;" + "*.white;*.pial;*.inflated;*.sphere;*.orig;" + "*.vtk;" + "*.blend;") name = StringProperty( name="Name", description="Specify a name for the object (default: filename)", default="") parentpath = StringProperty( name="Parentpath", description="The path to the parent of the object", default="nb") sformfile = StringProperty( name="sformfile", description="", default="", subtype="FILE_PATH") beautify = BoolProperty( name="Beautify", description="Apply initial smoothing on surfaces", default=True) colourtype = EnumProperty( name="", description="Apply this surface colour method", default="primary6", items=[("none", "none", "none", 1), ("golden_angle", "golden_angle", "golden_angle", 2), ("primary6", "primary6", "primary6", 3), ("random", "random", "random", 4), ("directional", "directional", "directional", 5), ("pick", "pick", "pick", 6)]) colourpicker = FloatVectorProperty( name="", description="Pick a colour for the tract(s)", default=[1.0, 0.0, 0.0], subtype="COLOR") transparency = FloatProperty( name="Transparency", description="Set the transparency", default=1., min=0., max=1.) def execute(self, context): filenames = [f.name for f in self.files] if not filenames: filenames = os.listdir(self.directory) for f in filenames: fpath = os.path.join(self.directory, f) info = self.import_surface(context, fpath) return {"FINISHED"} def draw(self, context): layout = self.layout row = layout.row() row.prop(self, "name") row = layout.row() row.prop(self, "beautify") row = layout.row() row.label(text="Colour: ") row = layout.row() row.prop(self, "colourtype") row = layout.row() if self.colourtype == "pick": row.prop(self, "colourpicker") row = layout.row() row.prop(self, "transparency") def invoke(self, context, event): context.window_manager.fileselect_add(self) return {"RUNNING_MODAL"} def import_surface(self, context, fpath): """Import a surface object. This imports the surfaces found in the specified file. Valid formats include: - .gii (via nibabel) - .white/.pial/.inflated/.sphere/.orig (FreeSurfer) - .obj - .stl - .blend 'sformfile' sets matrix_world to affine transformation. """ scn = context.scene nb = scn.nb ca = [bpy.data.objects, bpy.data.meshes, bpy.data.materials, bpy.data.textures] name = nb_ut.check_name(self.name, fpath, ca) outcome = "failed" ext = os.path.splitext(fpath)[1] try: fun = "self.read_surfaces_{}".format(ext[1:]) surfaces = eval('{}(fpath, name, self.sformfile)'.format(fun)) except NameError: reason = "file format '{}' not supported".format(ext) info = "import {}: {}".format(outcome, reason) return info except (IOError, FileNotFoundError): reason = "file '{}' not valid".format(fpath) info = "import {}: {}".format(outcome, reason) return info except ImportError: reason = "nibabel not found" info = "import {}: {}".format(outcome, reason) return info except: reason = "unknown import error" info = "import {}: {}".format(outcome, reason) raise for surf in surfaces: ob, affine, sformfile = surf ob.matrix_world = affine if ext[1:] == 'blend': name = ob.name props = {"name": name, "filepath": fpath, "sformfile": sformfile} self.surface_to_nb(context, props, ob) if self.colourtype != "none": info_mat = nb_ma.materialise(ob, self.colourtype, self.colourpicker, self.transparency) else: info_mat = "no materialisation" beaudict = {"iterations": 10, "factor": 0.5, "use_x": True, "use_y": True, "use_z": True} if self.beautify: info_beau = self.beautification(ob, beaudict) else: info_beau = 'no beautification' scn.objects.active = ob ob.select = True scn.update() info = "Surface import successful" if nb.settingprops.verbose: infostring = "{}\n" infostring += "name: '{}'\n" infostring += "path: '{}'\n" infostring += "transform: \n" infostring += "{}\n" infostring += "{}\n" infostring += "{}" info = infostring.format(info, name, fpath, affine, info_mat, info_beau) self.report({'INFO'}, info) return "info" @staticmethod def surface_to_nb(context, props, ob): """Import a surface into NeuroBlender.""" scn = context.scene nb = scn.nb group = bpy.data.groups.get("surfaces") or \ bpy.data.groups.new("surfaces") item = nb_ut.add_item(nb, "surfaces", props) # force updates on surfaces item.sformfile = item.sformfile nb_ut.move_to_layer(ob, 1) scn.layers[1] = True try: group.objects.link(ob) except: pass def read_surfaces_obj(self, fpath, name, sformfile): """Import a surface from a .obj file.""" # TODO: multiple objects import # need split_mode='OFF' for loading scalars onto the correct vertices bpy.ops.import_scene.obj(filepath=fpath, axis_forward='Y', axis_up='Z', split_mode='OFF') ob = bpy.context.selected_objects[0] ob.name = name affine = nb_ut.read_affine_matrix(sformfile) return [(ob, affine, sformfile)] def read_surfaces_stl(self, fpath, name, sformfile): """Import a surface from a .stl file.""" # TODO: multiple objects import bpy.ops.import_mesh.stl(filepath=fpath, axis_forward='Y', axis_up='Z') ob = bpy.context.selected_objects[0] ob.name = name affine = nb_ut.read_affine_matrix(sformfile) return [(ob, affine, sformfile)] def read_surfaces_gii(self, fpath, name, sformfile): """Import a surface from a .gii file.""" # TODO: multiple objects import scn = bpy.context.scene nb = scn.nb nib = nb_ut.validate_nibabel('.gifti') img = nib.load(fpath) verts = [tuple(vert) for vert in img.darrays[0].data] faces = [tuple(face) for face in img.darrays[1].data] xform = img.darrays[0].coordsys.xform if len(xform) == 16: xform = np.reshape(xform, [4, 4]) affine = Matrix(xform) sformfile = fpath me = bpy.data.meshes.new(name) me.from_pydata(verts, [], faces) ob = bpy.data.objects.new(name, me) scn.objects.link(ob) return [(ob, affine, sformfile)] def read_surfaces_fs(self, fpath, name, sformfile): """Import a surface from a FreeSurfer file.""" scn = bpy.context.scene nb = scn.nb nib = nb_ut.validate_nibabel('.gifti') fsio = nib.freesurfer.io verts, faces = fsio.read_geometry(fpath) verts = [tuple(vert) for vert in verts] faces = [tuple(face) for face in faces] affine = Matrix() me = bpy.data.meshes.new(name) me.from_pydata(verts, [], faces) ob = bpy.data.objects.new(name, me) bpy.context.scene.objects.link(ob) return [(ob, affine, sformfile)] read_surfaces_white = read_surfaces_fs read_surfaces_pial = read_surfaces_fs read_surfaces_inflated = read_surfaces_fs read_surfaces_sphere = read_surfaces_fs read_surfaces_orig = read_surfaces_fs def read_surfaces_blend(self, fpath, name, sformfile=""): """Import a surface from a .blend file.""" if sformfile: affine = Matrix(nb_ut.read_affine_matrix(sformfile)) with bpy.data.libraries.load(fpath) as (data_from, data_to): data_to.objects = data_from.objects surfaces = [] for ob in data_to.objects: if ob is not None: bpy.context.scene.objects.link(ob) ob.name = ob.name.replace(' ', '_') if not sformfile: affine = ob.matrix_world surfaces.append((ob, affine, sformfile)) return surfaces def read_surfaces_vtk(self, fpath, name, sformfile=""): """Return a surface in a .vtk polygon file.""" verts, faces = self.import_vtk_polygons(fpath) verts = [tuple(vert) for vert in verts] faces = [tuple(face) for face in faces] affine = Matrix() me = bpy.data.meshes.new(name) me.from_pydata(verts, [], faces) ob = bpy.data.objects.new(name, me) bpy.context.scene.objects.link(ob) return [(ob, affine, sformfile)] @staticmethod def import_vtk_polygons(vtkfile): """Read points and polylines from file""" with open(vtkfile) as f: read_points = 0 read_polygons = 0 for line in f: tokens = line.rstrip("\n").split(' ') if tokens[0] == "POINTS": read_points = 1 npoints = int(tokens[1]) points = [] elif read_points == 1 and len(points) < npoints * 3: for token in tokens: if token: points.append(float(token)) elif tokens[0] == "POLYGONS": read_polygons = 1 npolys = int(tokens[1]) polygons = [] elif read_polygons == 1 and len(polygons) < npolys: polygon = [] for token in tokens[1:]: if token: polygon.append(int(token)) polygons.append(polygon) elif tokens[0] == '': pass else: pass points = np.reshape(np.array(points), (npoints, 3)) return points, polygons @staticmethod def beautification(ob, argdict={"iterations": 10, "factor": 0.5, "use_x": True, "use_y": True, "use_z": True}): """Smooth the surface mesh.""" mod = ob.modifiers.new("smooth", type='SMOOTH') mod.iterations = argdict["iterations"] mod.factor = argdict["factor"] mod.use_x = argdict["use_x"] mod.use_y = argdict["use_y"] mod.use_z = argdict["use_z"] infostring = "smooth: " infostring += "iterations={:d}; " infostring += "factor={:.3f}; " infostring += "use_xyz=[{}, {}, {}];" info = infostring.format(argdict["iterations"], argdict["factor"], argdict["use_x"], argdict["use_y"], argdict["use_z"]) return info
michielkleinnijenhuis/NeuroBlender
imports/import_surfaces.py
Python
gpl-3.0
14,240
[ "VTK" ]
cb549aa86ea0085b19b5db9510ac483fa459eb76156e75715cd1be286d91efd0
import random import string import pickle import cherrypy import numpy as np import pandas as pd from scipy.sparse import csr_matrix import re import os from jinja2 import Environment, FileSystemLoader path = os.path.abspath(os.path.dirname(__file__)) config = { 'global' : { 'server.socket_host' : '0.0.0.0', 'server.socket_port' : 7071, 'server.thread_pool' : 8 }, '/css' : { 'tools.staticdir.on' : True, 'tools.staticdir.dir' : os.path.join(path, 'css') }, '/fonts' : { 'tools.staticdir.on' : True, 'tools.staticdir.dir' : os.path.join(path, 'fonts') } } env = Environment(loader=FileSystemLoader('templates')) class DeepThought(object): def __init__(self): self.all_identifiers = pickle.load(open('all_identifiers.pkl')) self.X_tfidf = load_sparse_csr('x_tfidf.csr.npz') self.meta = pickle.load(open('meta.pkl')) self.tfidf_vect = pickle.load(open('tfidf_vect.pkl')) @cherrypy.expose def index(self): template = env.get_template('index.html') return template.render() @cherrypy.expose def arxiv_search(self, identifier='1207.4481'): identifier = identifier.strip() template = env.get_template('arxiv_search') if identifier not in self.all_identifiers: return template.render(identifier=identifier, unknown_id=True) else: test_document_id = self.all_identifiers.index(identifier) test_document = self.X_tfidf[test_document_id] ranked_similarity, ranked_identifiers = self._get_similar_documents(test_document) data_table = self._generate_table(ranked_similarity, ranked_identifiers) return template.render(identifier=identifier, data_table=data_table) #return ''.join(random.sample(string.hexdigits, int(length))) @cherrypy.expose def text_search(self, text='astronomy galaxy star'): test_document = self.tfidf_vect.transform([text]) ranked_similarity, ranked_identifiers = self._get_similar_documents(test_document) data_table = self._generate_table(ranked_similarity, ranked_identifiers) template = env.get_template('arxiv_search') return template.render(search_str=text, data_table=data_table) def _generate_table(self, ranked_similarity, ranked_identifiers): if np.sum(ranked_similarity) < 1e-10: return "No matches found" print ranked_similarity, ranked_identifiers j = 0 table_similarity = [] table_identifier = [] table_title = [] table_link = [] for simil, identifier in zip(ranked_similarity, ranked_identifiers): table_similarity.append(simil) # older ID's look like "astro-ph0410673"; they are more useful with the slash identifier = identifier if '.' in identifier else '{}/{}'.format(identifier[:8], identifier[8:]) table_identifier.append(identifier) if identifier in self.meta: title = str(self.meta[identifier]['title']) # strip brackets etc.: title = re.sub(r'(\[\'|\[\"|\'\]|\"\]|\\n|\n)', "", title) table_title.append(title) else: table_title.append('Title N/A') table_link.append('https://arxiv.org/abs/{0}'.format(identifier)) j+=1 print 'at', j if j > 50: break data_table = pd.DataFrame(zip(table_identifier, table_title, table_link, table_similarity), columns = ['identifier', 'title', 'link', 'similarity']) return data_table.to_dict('records') def _get_similar_documents(self, test_document): similarity = np.squeeze((self.X_tfidf * test_document.T).A) similarity_argsort = np.argsort(similarity)[::-1] ranked_similarity = similarity[similarity_argsort] ranked_identifiers = np.array(self.all_identifiers)[similarity_argsort] return ranked_similarity, ranked_identifiers def save_sparse_csr(filename, array): np.savez(filename,data = array.data ,indices=array.indices, indptr =array.indptr, shape=array.shape ) def load_sparse_csr(filename): loader = np.load(filename) return csr_matrix(( loader['data'], loader['indices'], loader['indptr']), shape = loader['shape']) if __name__ == '__main__': print 'loading...' dt = DeepThought() print "loading done" cherrypy.quickstart(dt, '/', config) # cherrypy.quickstart(dt)
dojobo/deepthought_web
deepthought_web.py
Python
bsd-3-clause
4,592
[ "Galaxy" ]
a9cbe6e08b928ec35dac3c0c4e38badf9160a14f03053669e58b01363ef0c7ef
import ast import os import sys if sys.version_info.major == 3: from configparser import SafeConfigParser else: from ConfigParser import SafeConfigParser from pathlib2 import Path # Ignore matplotlib deprecation warnings import warnings import matplotlib.cbook warnings.filterwarnings('ignore', category=UserWarning, module='matplotlib') warnings.filterwarnings('ignore', category=FutureWarning) # TODO: Might be unadvisable to suppress all future warnings # Parse config file parser = SafeConfigParser() parser.read('../config_IAM.txt') # Get directory path (3 levels up is the parent directory) dir_prj = str(Path(__file__).parents[3]) # Common values dict_conts = {0: 'Antartica', 1: 'North_America', 2: 'South_America', 3: 'Europe', 4: 'Asia', 5: 'Africa', 6: 'Australia'} ##################################################################################### # Tags to be modified by user ##################################################################################### do_email = parser.getboolean('CONTROL', 'do_email') email_list = ast.literal_eval(parser.get('CONTROL', 'email_list')) SHFT_MAP = parser.get('CONTROL', 'SHFT_MAP') # Use Andreas or Butler? MOVIE_SEP = 10 do_LUH1 = parser.getboolean('CONTROL', 'do_LUH1') PLOT_HYDE = parser.getboolean('CONTROL', 'PLOT_HYDE') PREPROCESS_GCAM = parser.getboolean('CONTROL', 'PREPROCESS_GCAM') PREPROCESS_IMAG = parser.getboolean('CONTROL', 'PREPROCESS_IMAG') CONVERT_WH = parser.getboolean('CONTROL', 'CONVERT_WH') # Convert WH information from AEZ to country level ending_diag_cols = ast.literal_eval(parser.get('CONTROL', 'ending_diag_cols')) MATURITY_AGE = parser.getfloat('CONTROL', 'MATURITY_AGE') # Directories input_dir = dir_prj + os.sep + parser.get('GLM', 'path_input') gcam_dir = input_dir + os.sep + parser.get('PATHS', 'gcam_dir') + os.sep out_dir = dir_prj + os.sep + parser.get('PATHS', 'out_dir') + os.sep + parser.get('PROJECT', 'project_name') + os.sep log_dir = out_dir + os.sep + 'Logs' codes_dir = input_dir + os.sep + parser.get('PATHS', 'codes_dir') # Continent and country codes # project-specific constants TAG = parser.get('PROJECT', 'TAG') CROPS = ast.literal_eval(parser.get('GCAM', 'CROPS')) PASTURE = ast.literal_eval(parser.get('GCAM', 'PASTURE')) FOREST = ast.literal_eval(parser.get('GCAM', 'FOREST')) URBAN = ast.literal_eval(parser.get('GCAM', 'URBAN')) FNF_DEFN = 2.0 # Forest/Non-forest definition # GLM cell_area_name = 'carea' ice_water_frac = 'icwtr' # Control parameters for glm do_alternate = parser.getboolean('GLM', 'do_alternate') legend_glm = parser.get('GLM', 'legend_glm') legend_alt_glm = parser.get('GLM', 'legend_alt_glm') # glm static data path_glm_stat = dir_prj + os.sep + parser.get('GLM', 'path_glm_stat') # static data e.g. cell area path_glm_carea = dir_prj + os.sep + parser.get('GLM', 'path_glm_carea') # glm cell area quarter dedpigree path_glm_vba = dir_prj + os.sep + parser.get('GLM', 'path_glm_vba') # glm miami lu based virgin biomass path_glm_new_vba = dir_prj + os.sep + parser.get('GLM', 'path_glm_new_vba') # glm NEW miami lu based virgin biomass # Path glm input/output path_glm_input = dir_prj + os.sep + parser.get('GLM', 'path_glm_input') glm_experiments = ast.literal_eval(parser.get('GLM', 'folder_glm')) path_glm_output = dir_prj + os.sep + parser.get('GLM', 'path_glm_output') # output path # netCDF glm path_nc_states = parser.get('GLM', 'path_nc_states') # states data for past path_nc_trans = parser.get('GLM', 'path_nc_trans') # Transition data for past path_nc_mgt = parser.get('GLM', 'path_nc_mgt') # management data for past # netCDF future glm path_nc_futr_states = parser.get('GLM', 'path_nc_futr_states') # states data for future path_nc_futr_mgt = parser.get('GLM', 'path_nc_futr_mgt') # management data for futue # netCDF past + future glm path_nc_all_states = parser.get('GLM', 'path_nc_all_states') # states data for past + future path_nc_all_mgt = parser.get('GLM', 'path_nc_all_mgt') # management data for past + future # Second set of glm outputs (for comparison with first set) path_nc_alt_state = parser.get('GLM', 'path_nc_alt_state') # alternate glm data states path_nc_alt_trans = parser.get('GLM', 'path_nc_alt_trans') # transitions for alternate glm data # HYDE3.2_march HYDE32_march_CROP_PATH = dir_prj + os.sep + parser.get('HYDE3.2_march', 'hyde32_march_crop_path') HYDE32_march_OTHR_PATH = dir_prj + os.sep + parser.get('HYDE3.2_march', 'hyde32_march_othr_path') HYDE32_march_PAST_PATH = dir_prj + os.sep + parser.get('HYDE3.2_march', 'hyde32_march_past_path') HYDE32_march_GRAZ_PATH = dir_prj + os.sep + parser.get('HYDE3.2_march', 'hyde32_march_graz_path') HYDE32_march_RANG_PATH = dir_prj + os.sep + parser.get('HYDE3.2_march', 'hyde32_march_rang_path') HYDE32_march_URBN_PATH = dir_prj + os.sep + parser.get('HYDE3.2_march', 'hyde32_march_urbn_path') HYDE32_march_OUT_PATH = dir_prj + os.sep + parser.get('HYDE3.2_march', 'out_hyde32_march_path') # HYDE3.2 HYDE32_CROP_PATH = dir_prj + os.sep + parser.get('HYDE3.2', 'hyde32_crop_path') HYDE32_OTHR_PATH = dir_prj + os.sep + parser.get('HYDE3.2', 'hyde32_othr_path') HYDE32_PAST_PATH = dir_prj + os.sep + parser.get('HYDE3.2', 'hyde32_past_path') HYDE32_GRAZ_PATH = dir_prj + os.sep + parser.get('HYDE3.2', 'hyde32_graz_path') HYDE32_RANG_PATH = dir_prj + os.sep + parser.get('HYDE3.2', 'hyde32_rang_path') HYDE32_URBN_PATH = dir_prj + os.sep + parser.get('HYDE3.2', 'hyde32_urbn_path') HYDE32_OUT_PATH = dir_prj + os.sep + parser.get('HYDE3.2', 'out_hyde32_path') # HYDE3.2v1h_beta_crop_path HYDE32_v1hb_CROP_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v1h_beta', 'hyde32_v1h_beta_crop_path') HYDE32_v1hb_OTHR_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v1h_beta', 'hyde32_v1h_beta_othr_path') HYDE32_v1hb_PAST_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v1h_beta', 'hyde32_v1h_beta_past_path') HYDE32_v1hb_GRAZ_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v1h_beta', 'hyde32_v1h_beta_graz_path') HYDE32_v1hb_RANG_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v1h_beta', 'hyde32_v1h_beta_rang_path') HYDE32_v1hb_URBN_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v1h_beta', 'hyde32_v1h_beta_urbn_path') HYDE32_v1hb_OUT_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v1h_beta', 'out_hyde32_v1h_beta_path') # HYDE3.2 # Used in version 0.3 HYDE32_v03_CROP_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v0.3', 'hyde32_v0.3_crop_path') HYDE32_v03_OTHR_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v0.3', 'hyde32_v0.3_othr_path') HYDE32_v03_PAST_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v0.3', 'hyde32_v0.3_past_path') HYDE32_v03_GRAZ_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v0.3', 'hyde32_v0.3_graz_path') HYDE32_v03_RANG_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v0.3', 'hyde32_v0.3_rang_path') HYDE32_v03_URBN_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v0.3', 'hyde32_v0.3_urbn_path') HYDE32_v03_OUT_PATH = dir_prj + os.sep + parser.get('HYDE3.2_v0.3', 'out_hyde32_v0.3_path') # HYDE 3.1 HYDE31_CROP_PATH = dir_prj + os.sep + parser.get('HYDE3.1', 'hyde31_crop_path') HYDE31_OTHR_PATH = dir_prj + os.sep + parser.get('HYDE3.1', 'hyde31_othr_path') HYDE31_PAST_PATH = dir_prj + os.sep + parser.get('HYDE3.1', 'hyde31_past_path') HYDE31_URBN_PATH = dir_prj + os.sep + parser.get('HYDE3.1', 'hyde31_urbn_path') HYDE31_OUT_PATH = dir_prj + os.sep + parser.get('HYDE3.1', 'out_hyde31_path') # GCAM GCAM_OUT = dir_prj + os.sep + parser.get('GCAM', 'GCAM_OUT') GCAM_CROPS = dir_prj + os.sep + parser.get('GCAM', 'GCAM_CROPS') DPI = 150 # dots per inch for saved figures NUM_LATS = 180.0 NUM_LONS = 360.0 # FAO FAO_CONCOR = input_dir + os.sep + parser.get('COUNTRIES', 'FAO_CONCOR') # Country and continent codes ccodes_file = codes_dir + os.sep + parser.get('CODES', 'ccodes_file') contcodes_file = codes_dir + os.sep + parser.get('CODES', 'contcodes_file') # Common data CNTRY_CODES = input_dir + os.sep + parser.get('LUMIP_DATA', 'ccodes') # Country codes CELL_AREA_Q = input_dir + os.sep + parser.get('LUMIP_DATA', 'careaq') # quarter degree cell area CELL_AREA_H = input_dir + os.sep + parser.get('LUMIP_DATA', 'careah') # half degree cell area CELL_AREA_Q = input_dir + os.sep + parser.get('LUMIP_DATA', 'careao') # one degree cell area # Shifting cultivation shft_by_country = parser.getboolean('SHIFTING', 'shft_by_country') ASC_BUTLER = input_dir + os.sep + parser.get('SHIFTING', 'butler_ascii') # Butler map (ascii) TIF_ANDREAS = input_dir + os.sep + parser.get('SHIFTING', 'andreas_map') # Andreas map (tiff) NC_ANDREAS = input_dir + os.sep + parser.get('SHIFTING', 'andreas_nc') # Andreas netCDF ASC_ANDREAS = input_dir + os.sep + parser.get('SHIFTING', 'andreas_ascii') # Andreas ascii ASC_ANDREAS_2100 = input_dir + os.sep + parser.get('SHIFTING', 'andreas_ascii_2100') # Andreas ascii # Age age_poulter = input_dir + os.sep + parser.get('AGE', 'age_poulter') # Other rs_forest = input_dir + os.sep + parser.get('OTHER', 'rs_forest') wh_file = input_dir + os.sep + parser.get('OTHER', 'wh_file') # Monfreda MFD_DATA_DIR = input_dir + os.sep + parser.get('MONFREDA', 'mon_data_dir') # MIAMI-LU miami_lu_nc = input_dir + os.sep + parser.get('MIAMI_LU', 'miami_lu_nc') miami_npp = input_dir + os.sep + parser.get('MIAMI_LU', 'miami_npp') # Previous MIAMI-LU NPP estimates miami_vba = input_dir + os.sep + parser.get('MIAMI_LU', 'miami_vba') # Previous MIAMI-LU biomass estimates # HOTSPOTS file_hotspots = dir_prj + os.sep + parser.get('HOTSPOTS', 'path_hotspots') # GCAM Wood harvest file WOOD_HARVEST = dir_prj + os.sep + parser.get('GCAM', 'WOOD_HARVEST') FERT_DATA = dir_prj + os.sep + parser.get('GCAM', 'FERT_DATA') GCAM_START_YR = parser.getint('GCAM', 'GCAM_START_YR') GCAM_END_YR = parser.getint('GCAM', 'GCAM_END_YR') GCAM_STEP_YR = parser.getint('GCAM', 'GCAM_STEP_YR') SKIP_GCAM_COLS = parser.getint('GCAM', 'SKIP_GCAM_COLS') GCAM_MAPPING = dir_prj + os.sep + parser.get('GCAM', 'GCAM_MAPPING') # CONSTANTS FILL_VALUE = 1e20 M2_TO_KM2 = 0.000001 M2_TO_HA = 1e-4 KM2_TO_HA = 100.0 IMG_SIZE = 100.0 BILLION = 1e9 KG_TO_PG = 1e-12 KG_TO_TG = 1e-9 KG_TO_MG = 1e-6 TO_MILLION = 1e-6 AGB_TO_BIOMASS = 4.0/3.0
ritviksahajpal/LUH2
LUH2/GLM/constants.py
Python
mit
10,142
[ "NetCDF" ]
244f4e2ba3a3eebae42ef137402411bf146fdba1073588765e2e40e2e5d06e37
""" ConstantExpressions gathers constant expression. """ from pythran.analyses.aliases import Aliases from pythran.analyses.globals_analysis import Globals from pythran.analyses.locals_analysis import Locals from pythran.analyses.pure_expressions import PureExpressions from pythran.intrinsic import FunctionIntr from pythran.passmanager import NodeAnalysis from pythran.tables import MODULES import ast class ConstantExpressions(NodeAnalysis): """Identify constant expressions.""" def __init__(self): self.result = set() super(ConstantExpressions, self).__init__(Globals, Locals, PureExpressions, Aliases) def add(self, node): self.result.add(node) return True def visit_BoolOp(self, node): return all(map(self.visit, node.values)) and self.add(node) def visit_BinOp(self, node): rec = all(map(self.visit, (node.left, node.right))) return rec and self.add(node) def visit_UnaryOp(self, node): return self.visit(node.operand) and self.add(node) def visit_IfExp(self, node): rec = all(map(self.visit, (node.test, node.body, node.orelse))) return rec and self.add(node) def visit_Compare(self, node): rec = all(map(self.visit, [node.left] + node.comparators)) return rec and self.add(node) def visit_Call(self, node): rec = all(map(self.visit, node.args + [node.func])) return rec and self.add(node) visit_Num = add visit_Str = add def visit_Subscript(self, node): rec = all(map(self.visit, (node.value, node.slice))) return rec and self.add(node) def visit_Name(self, node): if node in self.aliases: # params and store are not constants if not isinstance(node.ctx, ast.Load): return False # if we can alias on multiple value, it is not constant elif len(self.aliases[node].aliases) > 1: return False # if it is not a globals, it depends on variable so it is not # constant elif node.id not in self.globals: return False # if it is defined in the current function, it is not constant elif node.id in self.locals[node]: return False def is_function(x): return isinstance(x, (FunctionIntr, ast.FunctionDef, ast.alias)) pure_fun = all(alias in self.pure_expressions and is_function(alias) for alias in self.aliases[node].aliases) return pure_fun else: return False def visit_Attribute(self, node): def rec(w, n): if isinstance(n, ast.Name): return w[n.id] elif isinstance(n, ast.Attribute): return rec(w, n.value)[n.attr] return rec(MODULES, node).isconst() and self.add(node) def visit_Dict(self, node): rec = all(map(self.visit, node.keys + node.values)) return rec and self.add(node) def visit_List(self, node): return all(map(self.visit, node.elts)) and self.add(node) visit_Tuple = visit_List visit_Set = visit_List def visit_Slice(self, node): # ultra-conservative, indeed return False def visit_Index(self, node): return self.visit(node.value) and self.add(node)
artas360/pythran
pythran/analyses/constant_expressions.py
Python
bsd-3-clause
3,550
[ "VisIt" ]
d792b7a2b1e7e617e8542eb26dcf8b1d0c74a4364ef31a9af391a6c3ccfc4305
# Copyright (C) 2016 Arvid Fahlström Myrman # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import functools import numpy as np import theano import theano.tensor as T from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams from . import init, activations, utils class PickleableLayer(type): def __new__(cls, name, bases, namespace): # NOTE: root class name is hard coded if name != 'Layer' and ('__getstate__' in namespace and '__setstate__' in namespace): gs, ss = namespace['__getstate__'], namespace['__setstate__'] @functools.wraps(gs) def __getstate__(self): super_state = super(newcls, self).__getstate__() return super_state, gs(self) @functools.wraps(ss) def __setstate__(self, state): super_state, state = state super(newcls, self).__setstate__(super_state) ss(self, state) namespace['__getstate__'] = __getstate__ namespace['__setstate__'] = __setstate__ newcls = type.__new__(cls, name, bases, namespace) return newcls # NOTE: also update PickleableLayer if changing the name of the class class Layer(metaclass=PickleableLayer): def __init__(self, parents=(), inputs=(), output_shape=None): self.parents = utils.ensure_tuple(parents) self._inputs = inputs self.output_shape = output_shape def all_layers(self): for parent in self.parents: yield from parent.all_layers() yield self def parameters(self): return () def all_parameters(self): for layer in self.all_layers(): yield from layer.parameters() def inputs(self): return self._inputs def process_input_dict(self, inputs): givens = {} for layer in self.all_layers(): if len(layer.inputs()) == 0: continue try: layer_inputs = utils.ensure_tuple(inputs[layer]) except KeyError: raise ValueError("Missing input for layer {!r}".format(layer)) if len(layer_inputs) != len(layer.inputs()): raise ValueError(("Wrong number of inputs for layer {!r}: " "got {}, expected {}".format( layer, len(layer_inputs), len(layer.inputs())))) givens.update(zip(layer.inputs(), layer_inputs)) return givens def all_inputs(self): for layer in self.all_layers(): yield from layer.inputs() def output_for(self, *inputs, train_pass): raise NotImplementedError def output(self, inputs=None, train_pass=False): layer_output = self.output_for(*(parent.output(train_pass=train_pass) for parent in self.parents), train_pass=train_pass) if inputs is not None: givens = self.process_input_dict(inputs) layer_output = theano.clone(layer_output, givens) return layer_output def __getstate__(self): return self.parents, self._inputs, self.output_shape def __setstate__(self, state): self.parents, self._inputs, self.output_shape = state class InputLayer(Layer): def __init__(self, shape, dtype=theano.config.floatX): ttype = T.TensorType(dtype, (False,) * (len(shape) + 1)) self._input = ttype('input') super().__init__(inputs=(self._input,), output_shape=shape) def output_for(self, **kwargs): return self._input def __getstate__(self): return self._input def __setstate__(self, state): self._input = state class ReshapeLayer(Layer): def __init__(self, parent, shape): super().__init__(parent, output_shape=shape) self._shape = shape def output_for(self, input, **kwargs): return input.reshape((-1, *self._shape)) def __getstate__(self): return self._shape, def __setstate__(self, state): self._shape, = state class FeedForwardLayer(Layer): def __init__(self, parent, units, W=init.gaussian(0, 0.01), b=init.constant(0), activation=activations.linear, maxout=1): super().__init__(parent, output_shape=(units,)) self._activation = activation self._W = theano.shared(W((maxout, *parent.output_shape, units)), name='W') self._b = theano.shared(b((maxout, units)), name='b') def parameters(self, regularizable=False): yield self._W if not regularizable: yield self._b def output_for(self, input, train_pass): out = T.dot(input, self._W) + self._b maxout = out.max(axis=1) return self._activation(maxout) def __getstate__(self): return self._activation, self._W, self._b def __setstate__(self, state): self._activation, self._W, self._b = state class GaussianLayer(Layer): def __init__(self, parent, std, seed=None): super().__init__(parent, output_shape=parent.output_shape) self.std = std seed = seed or np.random.randint(1, 1 << 30, (6,)) self.rng = RandomStreams(seed=seed) def output_for(self, input, train_pass): if train_pass: noise = self.rng.normal(self.output_shape, std=self.std, dtype=theano.config.floatX) return input + noise else: return input def __getstate__(self): return self.std, self.rng def __setstate__(self, state): self.std, self.rng = state class DropoutLayer(Layer): def __init__(self, parent, dropout_rate, seed=None): super().__init__(parent, output_shape=parent.output_shape) self.alive_prob = 1 - dropout_rate seed = seed or np.random.randint(1, 1 << 30, (6,)) self.rng = RandomStreams(seed=seed) def output_for(self, input, train_pass): if train_pass: # NOTE: dtype needs to be specified in order to avoid casting to float64 mask = self.rng.binomial((input.shape[1],), p=self.alive_prob, dtype=input.dtype) return input * mask / self.alive_prob else: return input def __getstate__(self): return self.alive_prob, self.rng def __setstate__(self, state): self.alive_prob, self.rng = state if __name__ == '__main__': pass
arvidfm/masters-thesis
src/nn/layers.py
Python
gpl-2.0
7,270
[ "Gaussian" ]
de1d73684e5a7327ae32579f34886bdf646a0b8f554c2b6e481ad591f7ad9e56
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of image ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.compat import compat from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_image_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variables from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export ops.NotDifferentiable('RandomCrop') # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable('RGBToHSV') # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable('HSVToRGB') ops.NotDifferentiable('DrawBoundingBoxes') ops.NotDifferentiable('SampleDistortedBoundingBox') ops.NotDifferentiable('SampleDistortedBoundingBoxV2') # TODO(bsteiner): Implement the gradient function for extract_glimpse # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable('ExtractGlimpse') ops.NotDifferentiable('NonMaxSuppression') ops.NotDifferentiable('NonMaxSuppressionV2') ops.NotDifferentiable('NonMaxSuppressionWithOverlaps') # pylint: disable=invalid-name def _assert(cond, ex_type, msg): """A polymorphic assert, works with tensors and boolean expressions. If `cond` is not a tensor, behave like an ordinary assert statement, except that a empty list is returned. If `cond` is a tensor, return a list containing a single TensorFlow assert op. Args: cond: Something evaluates to a boolean value. May be a tensor. ex_type: The exception class to use. msg: The error message. Returns: A list, containing at most one assert op. """ if _is_tensor(cond): return [control_flow_ops.Assert(cond, [msg])] else: if not cond: raise ex_type(msg) else: return [] def _is_tensor(x): """Returns `True` if `x` is a symbolic tensor-like object. Args: x: A python object to check. Returns: `True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`. """ return isinstance(x, (ops.Tensor, variables.Variable)) def _ImageDimensions(image, rank): """Returns the dimensions of an image tensor. Args: image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`. rank: The expected rank of the image Returns: A list of corresponding to the dimensions of the input image. Dimensions that are statically known are python integers, otherwise they are integer scalar tensors. """ if image.get_shape().is_fully_defined(): return image.get_shape().as_list() else: static_shape = image.get_shape().with_rank(rank).as_list() dynamic_shape = array_ops.unstack(array_ops.shape(image), rank) return [ s if s is not None else d for s, d in zip(static_shape, dynamic_shape) ] def _Check3DImage(image, require_static=True): """Assert that we are working with properly shaped image. Args: image: 3-D Tensor of shape [height, width, channels] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if `image.shape` is not a 3-vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: image_shape = image.get_shape().with_rank(3) except ValueError: raise ValueError( "'image' (shape %s) must be three-dimensional." % image.shape) if require_static and not image_shape.is_fully_defined(): raise ValueError("'image' (shape %s) must be fully defined." % image_shape) if any(x == 0 for x in image_shape): raise ValueError("all dims of 'image.shape' must be > 0: %s" % image_shape) if not image_shape.is_fully_defined(): return [ check_ops.assert_positive( array_ops.shape(image), ["all dims of 'image.shape' " 'must be > 0.']) ] else: return [] def _Assert3DImage(image): """Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: 3-D Tensor of shape [height, width, channels] Raises: ValueError: if `image.shape` is not a 3-vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) def _AssertAtLeast3DImage(image): """Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 3-D Tensor of size [*, height, width, depth] Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _CheckAtLeast3DImage(image, require_static=False), image) def _CheckAtLeast3DImage(image, require_static=True): """Assert that we are working with properly shaped image. Args: image: >= 3-D Tensor of size [*, height, width, depth] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: if image.get_shape().ndims is None: image_shape = image.get_shape().with_rank(3) else: image_shape = image.get_shape().with_rank_at_least(3) except ValueError: raise ValueError("'image' must be at least three-dimensional.") if require_static and not image_shape.is_fully_defined(): raise ValueError('\'image\' must be fully defined.') if any(x == 0 for x in image_shape): raise ValueError( 'all dims of \'image.shape\' must be > 0: %s' % image_shape) if not image_shape.is_fully_defined(): return [ check_ops.assert_positive( array_ops.shape(image), ["all dims of 'image.shape' " 'must be > 0.']) ] else: return [] def fix_image_flip_shape(image, result): """Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least None,None,None. """ image_shape = image.get_shape() if image_shape == tensor_shape.unknown_shape(): result.set_shape([None, None, None]) else: result.set_shape(image_shape) return result @tf_export('image.random_flip_up_down') def random_flip_up_down(image, seed=None): """Randomly flips an image vertically (upside down). With a 1 in 2 chance, outputs the contents of `image` flipped along the first dimension, which is `height`. Otherwise output the image as-is. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A Python integer. Used to create a random seed. See `tf.set_random_seed` for behavior. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _random_flip(image, 0, seed, 'random_flip_up_down') @tf_export('image.random_flip_left_right') def random_flip_left_right(image, seed=None): """Randomly flip an image horizontally (left to right). With a 1 in 2 chance, outputs the contents of `image` flipped along the second dimension, which is `width`. Otherwise output the image as-is. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A Python integer. Used to create a random seed. See `tf.set_random_seed` for behavior. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _random_flip(image, 1, seed, 'random_flip_left_right') def _random_flip(image, flip_index, seed, scope_name): """Randomly (50% chance) flip an image along axis `flip_index`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: Dimension along which to flip image. Vertical: 0, Horizontal: 1 seed: A Python integer. Used to create a random seed. See `tf.set_random_seed` for behavior. scope_name: Name of the scope in which the ops are added. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(None, scope_name, [image]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() if shape.ndims == 3 or shape.ndims is None: uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror_cond = math_ops.less(uniform_random, .5) result = control_flow_ops.cond( mirror_cond, lambda: array_ops.reverse(image, [flip_index]), lambda: image, name=scope ) return fix_image_flip_shape(image, result) elif shape.ndims == 4: batch_size = array_ops.shape(image)[0] uniform_random = random_ops.random_uniform( [batch_size], 0, 1.0, seed=seed ) flips = math_ops.round( array_ops.reshape(uniform_random, [batch_size, 1, 1, 1]) ) flips = math_ops.cast(flips, image.dtype) flipped_input = array_ops.reverse(image, [flip_index + 1]) return flips * flipped_input + (1 - flips) * image else: raise ValueError('\'image\' must have either 3 or 4 dimensions.') @tf_export('image.flip_left_right') def flip_left_right(image): """Flip an image horizontally (left to right). Outputs the contents of `image` flipped along the width dimension. See also `reverse()`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _flip(image, 1, 'flip_left_right') @tf_export('image.flip_up_down') def flip_up_down(image): """Flip an image vertically (upside down). Outputs the contents of `image` flipped along the height dimension. See also `reverse()`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _flip(image, 0, 'flip_up_down') def _flip(image, flip_index, scope_name): """Flip an image either horizontally or vertically. Outputs the contents of `image` flipped along the dimension `flip_index`. See also `reverse()`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: 0 For vertical, 1 for horizontal. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(None, scope_name, [image]): image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() if shape.ndims == 3 or shape.ndims is None: return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index])) elif shape.ndims == 4: return array_ops.reverse(image, [flip_index+1]) else: raise ValueError('\'image\' must have either 3 or 4 dimensions.') @tf_export('image.rot90') def rot90(image, k=1, name=None): """Rotate image(s) counter-clockwise by 90 degrees. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the image is rotated by 90 degrees. name: A name for this operation (optional). Returns: A rotated tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(name, 'rot90', [image, k]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k') k.get_shape().assert_has_rank(0) k = math_ops.mod(k, 4) shape = image.get_shape() if shape.ndims == 3 or shape.ndims is None: return _rot90_3D(image, k, scope) elif shape.ndims == 4: return _rot90_4D(image, k, scope) else: raise ValueError('\'image\' must have either 3 or 4 dimensions.') def _rot90_3D(image, k, name_scope): """Rotate image counter-clockwise by 90 degrees `k` times. Args: image: 3-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the image is rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 3-D tensor of the same type and shape as `image`. """ def _rot90(): return array_ops.transpose(array_ops.reverse_v2(image, [1]), [1, 0, 2]) def _rot180(): return array_ops.reverse_v2(image, [0, 1]) def _rot270(): return array_ops.reverse_v2(array_ops.transpose(image, [1, 0, 2]), [1]) cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)] result = control_flow_ops.case( cases, default=lambda: image, exclusive=True, name=name_scope) result.set_shape([None, None, image.get_shape()[2]]) return result def _rot90_4D(images, k, name_scope): """Rotate batch of images counter-clockwise by 90 degrees `k` times. Args: images: 4-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the images are rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 4-D tensor of the same type and shape as `images`. """ def _rot90(): return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3]) def _rot180(): return array_ops.reverse_v2(images, [1, 2]) def _rot270(): return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2]) cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)] result = control_flow_ops.case( cases, default=lambda: images, exclusive=True, name=name_scope) shape = result.get_shape() result.set_shape([shape[0], None, None, shape[3]]) return result @tf_export(v1=['image.transpose', 'image.transpose_image']) def transpose_image(image): return transpose(image=image, name=None) @tf_export('image.transpose', v1=[]) def transpose(image, name=None): """Transpose image(s) by swapping the height and width dimension. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for this operation (optional). Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, width, height, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[width, height, channels]` Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(name, 'transpose', [image]): image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() if shape.ndims == 3 or shape.ndims is None: return array_ops.transpose(image, [1, 0, 2], name=name) elif shape.ndims == 4: return array_ops.transpose(image, [0, 2, 1, 3], name=name) else: raise ValueError('\'image\' must have either 3 or 4 dimensions.') @tf_export('image.central_crop') def central_crop(image, central_fraction): """Crop the central region of the image(s). Remove the outer parts of an image but retain the central region of the image along each dimension. If we specify central_fraction = 0.5, this function returns the region marked with "X" in the below diagram. -------- | | | XXXX | | XXXX | | | where "X" is the central 50% of the image. -------- This function works on either a single image (`image` is a 3-D Tensor), or a batch of images (`image` is a 4-D Tensor). Args: image: Either a 3-D float Tensor of shape [height, width, depth], or a 4-D Tensor of shape [batch_size, height, width, depth]. central_fraction: float (0, 1], fraction of size to crop Raises: ValueError: if central_crop_fraction is not within (0, 1]. Returns: 3-D / 4-D float Tensor, as per the input. """ with ops.name_scope(None, 'central_crop', [image]): image = ops.convert_to_tensor(image, name='image') if central_fraction <= 0.0 or central_fraction > 1.0: raise ValueError('central_fraction must be within (0, 1]') if central_fraction == 1.0: return image _AssertAtLeast3DImage(image) rank = image.get_shape().ndims if rank != 3 and rank != 4: raise ValueError('`image` should either be a Tensor with rank = 3 or ' 'rank = 4. Had rank = {}.'.format(rank)) # Helper method to return the `idx`-th dimension of `tensor`, along with # a boolean signifying if the dimension is dynamic. def _get_dim(tensor, idx): static_shape = tensor.get_shape().dims[idx].value if static_shape is not None: return static_shape, False return array_ops.shape(tensor)[idx], True # Get the height, width, depth (and batch size, if the image is a 4-D # tensor). if rank == 3: img_h, dynamic_h = _get_dim(image, 0) img_w, dynamic_w = _get_dim(image, 1) img_d = image.get_shape()[2] else: img_bs = image.get_shape()[0] img_h, dynamic_h = _get_dim(image, 1) img_w, dynamic_w = _get_dim(image, 2) img_d = image.get_shape()[3] # Compute the bounding boxes for the crop. The type and value of the # bounding boxes depend on the `image` tensor's rank and whether / not the # dimensions are statically defined. if dynamic_h: img_hd = math_ops.to_double(img_h) bbox_h_start = math_ops.to_int32((img_hd - img_hd * central_fraction) / 2) else: img_hd = float(img_h) bbox_h_start = int((img_hd - img_hd * central_fraction) / 2) if dynamic_w: img_wd = math_ops.to_double(img_w) bbox_w_start = math_ops.to_int32((img_wd - img_wd * central_fraction) / 2) else: img_wd = float(img_w) bbox_w_start = int((img_wd - img_wd * central_fraction) / 2) bbox_h_size = img_h - bbox_h_start * 2 bbox_w_size = img_w - bbox_w_start * 2 if rank == 3: bbox_begin = array_ops.stack([bbox_h_start, bbox_w_start, 0]) bbox_size = array_ops.stack([bbox_h_size, bbox_w_size, -1]) else: bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0]) bbox_size = array_ops.stack([-1, bbox_h_size, bbox_w_size, -1]) image = array_ops.slice(image, bbox_begin, bbox_size) # Reshape the `image` tensor to the desired size. if rank == 3: image.set_shape([ None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d ]) else: image.set_shape([ img_bs, None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d ]) return image @tf_export('image.pad_to_bounding_box') def pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width): """Pad `image` with zeros to the specified `height` and `width`. Adds `offset_height` rows of zeros on top, `offset_width` columns of zeros on the left, and then pads the image on the bottom and right with zeros until it has dimensions `target_height`, `target_width`. This op does nothing if `offset_*` is zero and the image already has size `target_height` by `target_width`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. offset_height: Number of rows of zeros to add on top. offset_width: Number of columns of zeros to add on the left. target_height: Height of output image. target_width: Width of output image. Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, target_height, target_width, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, or either `offset_height` or `offset_width` is negative. """ with ops.name_scope(None, 'pad_to_bounding_box', [image]): image = ops.convert_to_tensor(image, name='image') is_batch = True image_shape = image.get_shape() if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError('\'image\' must have either 3 or 4 dimensions.') assert_ops = _CheckAtLeast3DImage(image, require_static=False) batch, height, width, depth = _ImageDimensions(image, rank=4) after_padding_width = target_width - offset_width - width after_padding_height = target_height - offset_height - height assert_ops += _assert(offset_height >= 0, ValueError, 'offset_height must be >= 0') assert_ops += _assert(offset_width >= 0, ValueError, 'offset_width must be >= 0') assert_ops += _assert(after_padding_width >= 0, ValueError, 'width must be <= target - offset') assert_ops += _assert(after_padding_height >= 0, ValueError, 'height must be <= target - offset') image = control_flow_ops.with_dependencies(assert_ops, image) # Do not pad on the depth dimensions. paddings = array_ops.reshape( array_ops.stack([ 0, 0, offset_height, after_padding_height, offset_width, after_padding_width, 0, 0 ]), [4, 2]) padded = array_ops.pad(image, paddings) padded_shape = [ None if _is_tensor(i) else i for i in [batch, target_height, target_width, depth] ] padded.set_shape(padded_shape) if not is_batch: padded = array_ops.squeeze(padded, axis=[0]) return padded @tf_export('image.crop_to_bounding_box') def crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width): """Crops an image to a specified bounding box. This op cuts a rectangular part out of `image`. The top-left corner of the returned image is at `offset_height, offset_width` in `image`, and its lower-right corner is at `offset_height + target_height, offset_width + target_width`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. offset_height: Vertical coordinate of the top-left corner of the result in the input. offset_width: Horizontal coordinate of the top-left corner of the result in the input. target_height: Height of the result. target_width: Width of the result. Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, target_height, target_width, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, or either `offset_height` or `offset_width` is negative, or either `target_height` or `target_width` is not positive. """ with ops.name_scope(None, 'crop_to_bounding_box', [image]): image = ops.convert_to_tensor(image, name='image') is_batch = True image_shape = image.get_shape() if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError('\'image\' must have either 3 or 4 dimensions.') assert_ops = _CheckAtLeast3DImage(image, require_static=False) batch, height, width, depth = _ImageDimensions(image, rank=4) assert_ops += _assert(offset_width >= 0, ValueError, 'offset_width must be >= 0.') assert_ops += _assert(offset_height >= 0, ValueError, 'offset_height must be >= 0.') assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') assert_ops += _assert(width >= (target_width + offset_width), ValueError, 'width must be >= target + offset.') assert_ops += _assert(height >= (target_height + offset_height), ValueError, 'height must be >= target + offset.') image = control_flow_ops.with_dependencies(assert_ops, image) cropped = array_ops.slice( image, array_ops.stack([0, offset_height, offset_width, 0]), array_ops.stack([-1, target_height, target_width, -1])) cropped_shape = [ None if _is_tensor(i) else i for i in [batch, target_height, target_width, depth] ] cropped.set_shape(cropped_shape) if not is_batch: cropped = array_ops.squeeze(cropped, axis=[0]) return cropped @tf_export('image.resize_image_with_crop_or_pad') def resize_image_with_crop_or_pad(image, target_height, target_width): """Crops and/or pads an image to a target width and height. Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. If `width` or `height` is greater than the specified `target_width` or `target_height` respectively, this op centrally crops along that dimension. If `width` or `height` is smaller than the specified `target_width` or `target_height` respectively, this op centrally pads with 0 along that dimension. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Cropped and/or padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ with ops.name_scope(None, 'resize_image_with_crop_or_pad', [image]): image = ops.convert_to_tensor(image, name='image') image_shape = image.get_shape() is_batch = True if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError('\'image\' must have either 3 or 4 dimensions.') assert_ops = _CheckAtLeast3DImage(image, require_static=False) assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') image = control_flow_ops.with_dependencies(assert_ops, image) # `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks. # Make sure our checks come first, so that error messages are clearer. if _is_tensor(target_height): target_height = control_flow_ops.with_dependencies( assert_ops, target_height) if _is_tensor(target_width): target_width = control_flow_ops.with_dependencies(assert_ops, target_width) def max_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.maximum(x, y) else: return max(x, y) def min_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.minimum(x, y) else: return min(x, y) def equal_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.equal(x, y) else: return x == y _, height, width, _ = _ImageDimensions(image, rank=4) width_diff = target_width - width offset_crop_width = max_(-width_diff // 2, 0) offset_pad_width = max_(width_diff // 2, 0) height_diff = target_height - height offset_crop_height = max_(-height_diff // 2, 0) offset_pad_height = max_(height_diff // 2, 0) # Maybe crop if needed. cropped = crop_to_bounding_box(image, offset_crop_height, offset_crop_width, min_(target_height, height), min_(target_width, width)) # Maybe pad if needed. resized = pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width, target_height, target_width) # In theory all the checks below are redundant. if resized.get_shape().ndims is None: raise ValueError('resized contains no shape.') _, resized_height, resized_width, _ = _ImageDimensions(resized, rank=4) assert_ops = [] assert_ops += _assert( equal_(resized_height, target_height), ValueError, 'resized height is not correct.') assert_ops += _assert( equal_(resized_width, target_width), ValueError, 'resized width is not correct.') resized = control_flow_ops.with_dependencies(assert_ops, resized) if not is_batch: resized = array_ops.squeeze(resized, axis=[0]) return resized @tf_export(v1=['image.ResizeMethod']) class ResizeMethodV1(object): BILINEAR = 0 NEAREST_NEIGHBOR = 1 BICUBIC = 2 AREA = 3 @tf_export('image.ResizeMethod', v1=[]) class ResizeMethod(object): BILINEAR = 'bilinear' NEAREST_NEIGHBOR = 'nearest' BICUBIC = 'bicubic' AREA = 'area' LANCZOS3 = 'lanczos3' LANCZOS5 = 'lanczos5' GAUSSIAN = 'gaussian' MITCHELLCUBIC = 'mitchellcubic' def _resize_images_common(images, resizer_fn, size, preserve_aspect_ratio, name, skip_resize_if_same): """Core functionality for v1 and v2 resize functions.""" with ops.name_scope(name, 'resize', [images, size]): images = ops.convert_to_tensor(images, name='images') if images.get_shape().ndims is None: raise ValueError('\'images\' contains no shape.') # TODO(shlens): Migrate this functionality to the underlying Op's. is_batch = True if images.get_shape().ndims == 3: is_batch = False images = array_ops.expand_dims(images, 0) elif images.get_shape().ndims != 4: raise ValueError('\'images\' must have either 3 or 4 dimensions.') _, height, width, _ = images.get_shape().as_list() try: size = ops.convert_to_tensor(size, dtypes.int32, name='size') except (TypeError, ValueError): raise ValueError('\'size\' must be a 1-D int32 Tensor') if not size.get_shape().is_compatible_with([2]): raise ValueError('\'size\' must be a 1-D Tensor of 2 elements: ' 'new_height, new_width') size_const_as_shape = tensor_util.constant_value_as_shape(size) new_height_const = size_const_as_shape.dims[0].value new_width_const = size_const_as_shape.dims[1].value if preserve_aspect_ratio: # Get the current shapes of the image, even if dynamic. _, current_height, current_width, _ = _ImageDimensions(images, rank=4) # do the computation to find the right scale and height/width. scale_factor_height = (math_ops.to_float(new_height_const) / math_ops.to_float(current_height)) scale_factor_width = (math_ops.to_float(new_width_const) / math_ops.to_float(current_width)) scale_factor = math_ops.minimum(scale_factor_height, scale_factor_width) scaled_height_const = math_ops.to_int32( math_ops.round(scale_factor * math_ops.to_float(current_height))) scaled_width_const = math_ops.to_int32( math_ops.round(scale_factor * math_ops.to_float(current_width))) # NOTE: Reset the size and other constants used later. size = ops.convert_to_tensor([scaled_height_const, scaled_width_const], dtypes.int32, name='size') size_const_as_shape = tensor_util.constant_value_as_shape(size) new_height_const = size_const_as_shape.dims[0].value new_width_const = size_const_as_shape.dims[1].value # If we can determine that the height and width will be unmodified by this # transformation, we avoid performing the resize. if skip_resize_if_same and all( x is not None for x in [new_width_const, width, new_height_const, height]) and ( width == new_width_const and height == new_height_const): if not is_batch: images = array_ops.squeeze(images, axis=[0]) return images images = resizer_fn(images, size) # NOTE(mrry): The shape functions for the resize ops cannot unpack # the packed values in `new_size`, so set the shape here. images.set_shape([None, new_height_const, new_width_const, None]) if not is_batch: images = array_ops.squeeze(images, axis=[0]) return images @tf_export(v1=['image.resize_images', 'image.resize']) def resize_images(images, size, method=ResizeMethodV1.BILINEAR, align_corners=False, preserve_aspect_ratio=False, name=None): """Resize `images` to `size` using the specified `method`. Resized images will be distorted if their original aspect ratio is not the same as `size`. To avoid distortions see `tf.image.resize_image_with_pad`. `method` can be one of: * <b>`ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.]( https://en.wikipedia.org/wiki/Bilinear_interpolation) * <b>`ResizeMethod.NEAREST_NEIGHBOR`</b>: [Nearest neighbor interpolation.]( https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) * <b>`ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.]( https://en.wikipedia.org/wiki/Bicubic_interpolation) * <b>`ResizeMethod.AREA`</b>: Area interpolation. The return value has the same type as `images` if `method` is `ResizeMethod.NEAREST_NEIGHBOR`. It will also have the same type as `images` if the size of `images` can be statically determined to be the same as `size`, because `images` is returned in this case. Otherwise, the return value has type `float32`. Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. method: ResizeMethod. Defaults to `ResizeMethod.BILINEAR`. align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to `False`. preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set, then `images` will be resized to a size that fits in `size` while preserving the aspect ratio of the original image. Scales up the image if `size` is bigger than the current size of the `image`. Defaults to False. name: A name for this operation (optional). Raises: ValueError: if the shape of `images` is incompatible with the shape arguments to this function ValueError: if `size` has invalid shape or type. ValueError: if an unsupported resize method is specified. Returns: If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def resize_fn(images_t, new_size): """Legacy resize core function, passed to _resize_images_common.""" if method == ResizeMethodV1.BILINEAR or method == ResizeMethod.BILINEAR: return gen_image_ops.resize_bilinear( images_t, new_size, align_corners=align_corners) elif (method == ResizeMethodV1.NEAREST_NEIGHBOR or method == ResizeMethod.NEAREST_NEIGHBOR): return gen_image_ops.resize_nearest_neighbor( images_t, new_size, align_corners=align_corners) elif method == ResizeMethodV1.BICUBIC or method == ResizeMethod.BICUBIC: return gen_image_ops.resize_bicubic( images_t, new_size, align_corners=align_corners) elif method == ResizeMethodV1.AREA or method == ResizeMethod.AREA: return gen_image_ops.resize_area( images_t, new_size, align_corners=align_corners) else: raise ValueError('Resize method is not implemented.') return _resize_images_common( images, resize_fn, size, preserve_aspect_ratio=preserve_aspect_ratio, name=name, skip_resize_if_same=True) @tf_export('image.resize', v1=[]) def resize_images_v2(images, size, method=ResizeMethod.BILINEAR, preserve_aspect_ratio=False, antialias=False, name=None): """Resize `images` to `size` using the specified `method`. Resized images will be distorted if their original aspect ratio is not the same as `size`. To avoid distortions see `tf.image.resize_with_pad`. When 'antialias' is true, the sampling filter will anti-alias the input image as well as interpolate. When downsampling an image with [anti-aliasing]( https://en.wikipedia.org/wiki/Spatial_anti-aliasing) the sampling filter kernel is scaled in order to properly anti-alias the input image signal. 'antialias' has no effect when upsampling an image. * <b>`bilinear`</b>: [Bilinear interpolation.]( https://en.wikipedia.org/wiki/Bilinear_interpolation) If 'antialias' is true, becomes a hat/tent filter function with radius 1 when downsampling. * <b>`lanczos3`</b>: [Lanczos kernel]( https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 3. High-quality practical filter but may have some ringing especially on synthetic images. * <b>`lanczos5`</b>: [Lanczos kernel] ( https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 5. Very-high-quality filter but may have stronger ringing. * <b>`bicubic`</b>: [Cubic interpolant]( https://en.wikipedia.org/wiki/Bicubic_interpolation) of Keys. Equivalent to Catmull-Rom kernel. Reasonably good quality and faster than Lanczos3Kernel, particularly when upsampling. * <b>`gaussian`</b>: [Gaussian kernel]( https://en.wikipedia.org/wiki/Gaussian_filter) with radius 3, sigma = 1.5 / 3.] * <b>`nearest`</b>: [Nearest neighbor interpolation.]( https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) 'antialias' has no effect when used with nearest neighbor interpolation. * <b>`area`</b>: Anti-aliased resampling with area interpolation. 'antialias' has no effect when used with area interpolation; it always anti-aliases. * <b>`mitchellcubic`</b>: Mitchell-Netravali Cubic non-interpolating filter. For synthetic images (especially those lacking proper prefiltering), less ringing than Keys cubic kernel but less sharp. Note that near image edges the filtering kernel may be partially outside the image boundaries. For these pixels, only input pixels inside the image will be included in the filter sum, and the output value will be appropriately normalized. The return value has the same type as `images` if `method` is `ResizeMethod.NEAREST_NEIGHBOR`. Otherwise, the return value has type `float32`. Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. method: ResizeMethod. Defaults to `bilinear`. preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set, then `images` will be resized to a size that fits in `size` while preserving the aspect ratio of the original image. Scales up the image if `size` is bigger than the current size of the `image`. Defaults to False. antialias: Whether to use an anti-aliasing filter when downsampling an image. name: A name for this operation (optional). Raises: ValueError: if the shape of `images` is incompatible with the shape arguments to this function ValueError: if `size` has invalid shape or type. ValueError: if an unsupported resize method is specified. Returns: If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def resize_fn(images_t, new_size): """Resize core function, passed to _resize_images_common.""" scale_and_translate_methods = [ ResizeMethod.LANCZOS3, ResizeMethod.LANCZOS5, ResizeMethod.GAUSSIAN, ResizeMethod.MITCHELLCUBIC ] def resize_with_scale_and_translate(method): scale = ( math_ops.cast(new_size, dtype=dtypes.float32) / math_ops.cast(array_ops.shape(images_t)[1:3], dtype=dtypes.float32)) return gen_image_ops.scale_and_translate( images_t, new_size, scale, array_ops.zeros([2]), kernel_type=method, antialias=antialias) if method == ResizeMethod.BILINEAR: if antialias: return resize_with_scale_and_translate('triangle') else: return gen_image_ops.resize_bilinear( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.NEAREST_NEIGHBOR: return gen_image_ops.resize_nearest_neighbor( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.BICUBIC: if antialias: return resize_with_scale_and_translate('keyscubic') else: return gen_image_ops.resize_bicubic( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.AREA: return gen_image_ops.resize_area(images_t, new_size) elif method in scale_and_translate_methods: return resize_with_scale_and_translate(method) else: raise ValueError('Resize method is not implemented.') return _resize_images_common( images, resize_fn, size, preserve_aspect_ratio=preserve_aspect_ratio, name=name, skip_resize_if_same=False) def _resize_image_with_pad_common(image, target_height, target_width, resize_fn): """Core functionality for v1 and v2 resize_image_with_pad functions.""" with ops.name_scope(None, 'resize_image_with_pad', [image]): image = ops.convert_to_tensor(image, name='image') image_shape = image.get_shape() is_batch = True if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError('\'image\' must have either 3 or 4 dimensions.') assert_ops = _CheckAtLeast3DImage(image, require_static=False) assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') image = control_flow_ops.with_dependencies(assert_ops, image) def max_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.maximum(x, y) else: return max(x, y) _, height, width, _ = _ImageDimensions(image, rank=4) # convert values to float, to ease divisions f_height = math_ops.cast(height, dtype=dtypes.float64) f_width = math_ops.cast(width, dtype=dtypes.float64) f_target_height = math_ops.cast(target_height, dtype=dtypes.float64) f_target_width = math_ops.cast(target_width, dtype=dtypes.float64) # Find the ratio by which the image must be adjusted # to fit within the target ratio = max_(f_width / f_target_width, f_height / f_target_height) resized_height_float = f_height / ratio resized_width_float = f_width / ratio resized_height = math_ops.cast( math_ops.floor(resized_height_float), dtype=dtypes.int32) resized_width = math_ops.cast( math_ops.floor(resized_width_float), dtype=dtypes.int32) padding_height = (f_target_height - resized_height_float) / 2 padding_width = (f_target_width - resized_width_float) / 2 f_padding_height = math_ops.floor(padding_height) f_padding_width = math_ops.floor(padding_width) p_height = max_(0, math_ops.cast(f_padding_height, dtype=dtypes.int32)) p_width = max_(0, math_ops.cast(f_padding_width, dtype=dtypes.int32)) # Resize first, then pad to meet requested dimensions resized = resize_fn(image, [resized_height, resized_width]) padded = pad_to_bounding_box(resized, p_height, p_width, target_height, target_width) if padded.get_shape().ndims is None: raise ValueError('padded contains no shape.') _ImageDimensions(padded, rank=4) if not is_batch: padded = array_ops.squeeze(padded, axis=[0]) return padded @tf_export(v1=['image.resize_image_with_pad']) def resize_image_with_pad_v1(image, target_height, target_width, method=ResizeMethodV1.BILINEAR, align_corners=False): """Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. method: Method to use for resizing image. See `resize_images()` align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to `False`. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Resized and padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def _resize_fn(im, new_size): return resize_images(im, new_size, method, align_corners=align_corners) return _resize_image_with_pad_common(image, target_height, target_width, _resize_fn) @tf_export('image.resize_with_pad', v1=[]) def resize_image_with_pad_v2(image, target_height, target_width, method=ResizeMethod.BILINEAR, antialias=False): """Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. method: Method to use for resizing image. See `image.resize()` antialias: Whether to use anti-aliasing when resizing. See 'image.resize()'. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Resized and padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def _resize_fn(im, new_size): return resize_images_v2(im, new_size, method, antialias=antialias) return _resize_image_with_pad_common(image, target_height, target_width, _resize_fn) @tf_export('image.per_image_standardization') def per_image_standardization(image): """Linearly scales `image` to have zero mean and unit variance. This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average of all values in image, and `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`. `stddev` is the standard deviation of all values in `image`. It is capped away from zero to protect against division by 0 when handling uniform images. Args: image: An n-D Tensor where the last 3 dimensions are `[height, width, channels]`. Returns: The standardized image with same shape as `image`. Raises: ValueError: if the shape of 'image' is incompatible with this function. """ with ops.name_scope(None, 'per_image_standardization', [image]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:]) image = math_ops.cast(image, dtype=dtypes.float32) image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True) variance = ( math_ops.reduce_mean( math_ops.square(image), axis=[-1, -2, -3], keepdims=True) - math_ops.square(image_mean)) variance = gen_nn_ops.relu(variance) stddev = math_ops.sqrt(variance) # Apply a minimum normalization that protects us against uniform images. min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32)) pixel_value_scale = math_ops.maximum(stddev, min_stddev) pixel_value_offset = image_mean image = math_ops.subtract(image, pixel_value_offset) image = math_ops.div(image, pixel_value_scale, name=scope) return image @tf_export('image.random_brightness') def random_brightness(image, max_delta, seed=None): """Adjust the brightness of images by a random factor. Equivalent to `adjust_brightness()` using a `delta` randomly picked in the interval `[-max_delta, max_delta)`. Args: image: An image or images to adjust. max_delta: float, must be non-negative. seed: A Python integer. Used to create a random seed. See `tf.set_random_seed` for behavior. Returns: The brightness-adjusted image(s). Raises: ValueError: if `max_delta` is negative. """ if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed) return adjust_brightness(image, delta) @tf_export('image.random_contrast') def random_contrast(image, lower, upper, seed=None): """Adjust the contrast of an image or images by a random factor. Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly picked in the interval `[lower, upper]`. Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A Python integer. Used to create a random seed. See `tf.set_random_seed` for behavior. Returns: The contrast-adjusted image(s). Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') # Generate an a float in [lower, upper] contrast_factor = random_ops.random_uniform([], lower, upper, seed=seed) return adjust_contrast(image, contrast_factor) @tf_export('image.adjust_brightness') def adjust_brightness(image, delta): """Adjust the brightness of RGB or Grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their brightness, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. The value `delta` is added to all components of the tensor `image`. `image` is converted to `float` and scaled appropriately if it is in fixed-point representation, and `delta` is converted to the same data type. For regular images, `delta` should be in the range `[0,1)`, as it is added to the image in floating point representation, where pixel values are in the `[0,1)` range. Args: image: RGB image or images to adjust. delta: A scalar. Amount to add to the pixel values. Returns: A brightness-adjusted tensor of the same shape and type as `image`. """ with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in [dtypes.float16, dtypes.float32]: flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) adjusted = math_ops.add( flt_image, math_ops.cast(delta, flt_image.dtype), name=name) return convert_image_dtype(adjusted, orig_dtype, saturate=True) @tf_export('image.adjust_contrast') def adjust_contrast(images, contrast_factor): """Adjust contrast of RGB or grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their contrast, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. `images` is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as `[height, width, channels]`. The other dimensions only represent a collection of images, such as `[batch, height, width, channels].` Contrast is adjusted independently for each channel of each image. For each channel, this Op computes the mean of the image pixels in the channel and then adjusts each component `x` of each pixel to `(x - mean) * contrast_factor + mean`. Args: images: Images to adjust. At least 3-D. contrast_factor: A float multiplier for adjusting contrast. Returns: The contrast-adjusted image or images. """ with ops.name_scope(None, 'adjust_contrast', [images, contrast_factor]) as name: images = ops.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_images = images else: flt_images = convert_image_dtype(images, dtypes.float32) adjusted = gen_image_ops.adjust_contrastv2( flt_images, contrast_factor=contrast_factor, name=name) return convert_image_dtype(adjusted, orig_dtype, saturate=True) @tf_export('image.adjust_gamma') def adjust_gamma(image, gamma=1, gain=1): """Performs Gamma Correction on the input image. Also known as Power Law Transform. This function transforms the input image pixelwise according to the equation `Out = In**gamma` after scaling each pixel to the range 0 to 1. Args: image : A Tensor. gamma : A scalar or tensor. Non negative real number. gain : A scalar or tensor. The constant multiplier. Returns: A Tensor. Gamma corrected output image. Raises: ValueError: If gamma is negative. Notes: For gamma greater than 1, the histogram will shift towards left and the output image will be darker than the input image. For gamma less than 1, the histogram will shift towards right and the output image will be brighter than the input image. References: [1] http://en.wikipedia.org/wiki/Gamma_correction """ with ops.name_scope(None, 'adjust_gamma', [image, gamma, gain]) as name: # Convert pixel value to DT_FLOAT for computing adjusted image. img = ops.convert_to_tensor(image, name='img', dtype=dtypes.float32) # Keep image dtype for computing the scale of corresponding dtype. image = ops.convert_to_tensor(image, name='image') assert_op = _assert(gamma >= 0, ValueError, 'Gamma should be a non-negative real number.') if assert_op: gamma = control_flow_ops.with_dependencies(assert_op, gamma) # scale = max(dtype) - min(dtype). scale = constant_op.constant( image.dtype.limits[1] - image.dtype.limits[0], dtype=dtypes.float32) # According to the definition of gamma correction. adjusted_img = (img / scale)**gamma * scale * gain return adjusted_img @tf_export('image.convert_image_dtype') def convert_image_dtype(image, dtype, saturate=False, name=None): """Convert `image` to `dtype`, scaling its values if needed. Images that are represented using floating point values are expected to have values in the range [0,1). Image data stored in integer data types are expected to have values in the range `[0,MAX]`, where `MAX` is the largest positive representable number for the data type. This op converts between data types, scaling the values appropriately before casting. Note that converting from floating point inputs to integer types may lead to over/underflow problems. Set saturate to `True` to avoid such problem in problematic conversions. If enabled, saturation will clip the output into the allowed range before performing a potentially dangerous cast (and only before performing such a cast, i.e., when casting from a floating point to an integer type, and when casting from a signed to an unsigned type; `saturate` has no effect on casts between floats, or on casts that increase the type's range). Args: image: An image. dtype: A `DType` to convert `image` to. saturate: If `True`, clip the input before casting (if necessary). name: A name for this operation (optional). Returns: `image`, converted to `dtype`. """ image = ops.convert_to_tensor(image, name='image') if dtype == image.dtype: return array_ops.identity(image, name=name) with ops.name_scope(name, 'convert_image', [image]) as name: # Both integer: use integer multiplication in the larger range if image.dtype.is_integer and dtype.is_integer: scale_in = image.dtype.max scale_out = dtype.max if scale_in > scale_out: # Scaling down, scale first, then cast. The scaling factor will # cause in.max to be mapped to above out.max but below out.max+1, # so that the output is safely in the supported range. scale = (scale_in + 1) // (scale_out + 1) scaled = math_ops.div(image, scale) if saturate: return math_ops.saturate_cast(scaled, dtype, name=name) else: return math_ops.cast(scaled, dtype, name=name) else: # Scaling up, cast first, then scale. The scale will not map in.max to # out.max, but converting back and forth should result in no change. if saturate: cast = math_ops.saturate_cast(image, dtype) else: cast = math_ops.cast(image, dtype) scale = (scale_out + 1) // (scale_in + 1) return math_ops.multiply(cast, scale, name=name) elif image.dtype.is_floating and dtype.is_floating: # Both float: Just cast, no possible overflows in the allowed ranges. # Note: We're ignoreing float overflows. If your image dynamic range # exceeds float range you're on your own. return math_ops.cast(image, dtype, name=name) else: if image.dtype.is_integer: # Converting to float: first cast, then scale. No saturation possible. cast = math_ops.cast(image, dtype) scale = 1. / image.dtype.max return math_ops.multiply(cast, scale, name=name) else: # Converting from float: first scale, then cast scale = dtype.max + 0.5 # avoid rounding problems in the cast scaled = math_ops.multiply(image, scale) if saturate: return math_ops.saturate_cast(scaled, dtype, name=name) else: return math_ops.cast(scaled, dtype, name=name) @tf_export('image.rgb_to_grayscale') def rgb_to_grayscale(images, name=None): """Converts one or more images from RGB to Grayscale. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 1, containing the Grayscale value of the pixels. Args: images: The RGB tensor to convert. Last dimension must have size 3 and should contain RGB values. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name: images = ops.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype flt_image = convert_image_dtype(images, dtypes.float32) # Reference for converting between RGB and grayscale. # https://en.wikipedia.org/wiki/Luma_%28video%29 rgb_weights = [0.2989, 0.5870, 0.1140] gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1]) gray_float = array_ops.expand_dims(gray_float, -1) return convert_image_dtype(gray_float, orig_dtype, name=name) @tf_export('image.grayscale_to_rgb') def grayscale_to_rgb(images, name=None): """Converts one or more images from Grayscale to RGB. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 3, containing the RGB value of the pixels. Args: images: The Grayscale tensor to convert. Last dimension must be size 1. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name: images = ops.convert_to_tensor(images, name='images') rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0) shape_list = ([array_ops.ones(rank_1, dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)]) multiples = array_ops.concat(shape_list, 0) rgb = array_ops.tile(images, multiples, name=name) rgb.set_shape(images.get_shape()[:-1].concatenate([3])) return rgb # pylint: disable=invalid-name @tf_export('image.random_hue') def random_hue(image, max_delta, seed=None): """Adjust the hue of RGB images by a random factor. Equivalent to `adjust_hue()` but uses a `delta` randomly picked in the interval `[-max_delta, max_delta]`. `max_delta` must be in the interval `[0, 0.5]`. Args: image: RGB image or images. Size of the last dimension must be 3. max_delta: float. Maximum value for the random delta. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `max_delta` is invalid. """ if max_delta > 0.5: raise ValueError('max_delta must be <= 0.5.') if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed) return adjust_hue(image, delta) @tf_export('image.adjust_hue') def adjust_hue(image, delta, name=None): """Adjust hue of RGB images. This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type. If several adjustments are chained it is advisable to minimize the number of redundant conversions. `image` is an RGB image. The image hue is adjusted by converting the image(s) to HSV and rotating the hue channel (H) by `delta`. The image is then converted back to RGB. `delta` must be in the interval `[-1, 1]`. Args: image: RGB image or images. Size of the last dimension must be 3. delta: float. How much to add to the hue channel. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. """ with ops.name_scope(name, 'adjust_hue', [image]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) rgb_altered = gen_image_ops.adjust_hue(flt_image, delta) return convert_image_dtype(rgb_altered, orig_dtype) # pylint: disable=invalid-name @tf_export('image.random_jpeg_quality') def random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed=None): """Randomly changes jpeg encoding quality for inducing jpeg noise. `min_jpeg_quality` must be in the interval `[0, 100]` and less than `max_jpeg_quality`. `max_jpeg_quality` must be in the interval `[0, 100]`. Args: image: RGB image or images. Size of the last dimension must be 3. min_jpeg_quality: Minimum jpeg encoding quality to use. max_jpeg_quality: Maximum jpeg encoding quality to use. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid. """ if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or max_jpeg_quality > 100): raise ValueError('jpeg encoding range must be between 0 and 100.') if min_jpeg_quality >= max_jpeg_quality: raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.') np.random.seed(seed) jpeg_quality = np.random.randint(min_jpeg_quality, max_jpeg_quality) return adjust_jpeg_quality(image, jpeg_quality) @tf_export('image.adjust_jpeg_quality') def adjust_jpeg_quality(image, jpeg_quality, name=None): """Adjust jpeg encoding quality of an RGB image. This is a convenience method that adjusts jpeg encoding quality of an RGB image. `image` is an RGB image. The image's encoding quality is adjusted to `jpeg_quality`. `jpeg_quality` must be in the interval `[0, 100]`. Args: image: RGB image or images. Size of the last dimension must be 3. jpeg_quality: int. jpeg encoding quality. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. """ with ops.name_scope(name, 'adjust_jpeg_quality', [image]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype # Convert to uint8 image = convert_image_dtype(image, dtypes.uint8) # Encode image to jpeg with given jpeg quality image = gen_image_ops.encode_jpeg(image, quality=jpeg_quality) # Decode jpeg image image = gen_image_ops.decode_jpeg(image) # Convert back to original dtype and return return convert_image_dtype(image, orig_dtype) @tf_export('image.random_saturation') def random_saturation(image, lower, upper, seed=None): """Adjust the saturation of RGB images by a random factor. Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly picked in the interval `[lower, upper]`. Args: image: RGB image or images. Size of the last dimension must be 3. lower: float. Lower bound for the random saturation factor. upper: float. Upper bound for the random saturation factor. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') # Pick a float in [lower, upper] saturation_factor = random_ops.random_uniform([], lower, upper, seed=seed) return adjust_saturation(image, saturation_factor) @tf_export('image.adjust_saturation') def adjust_saturation(image, saturation_factor, name=None): """Adjust saturation of RGB images. This is a convenience method that converts RGB images to float representation, converts them to HSV, add an offset to the saturation channel, converts back to RGB and then back to the original data type. If several adjustments are chained it is advisable to minimize the number of redundant conversions. `image` is an RGB image or images. The image saturation is adjusted by converting the images to HSV and multiplying the saturation (S) channel by `saturation_factor` and clipping. The images are then converted back to RGB. Args: image: RGB image or images. Size of the last dimension must be 3. saturation_factor: float. Factor to multiply the saturation by. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. """ with ops.name_scope(name, 'adjust_saturation', [image]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) adjusted = gen_image_ops.adjust_saturation(flt_image, saturation_factor) return convert_image_dtype(adjusted, orig_dtype) @tf_export('io.is_jpeg', 'image.is_jpeg', v1=['io.is_jpeg', 'image.is_jpeg']) def is_jpeg(contents, name=None): r"""Convenience function to check if the 'contents' encodes a JPEG image. Args: contents: 0-D `string`. The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a JPEG image. is_jpeg is susceptible to false positives. """ # Normal JPEGs start with \xff\xd8\xff\xe0 # JPEG with EXIF stats with \xff\xd8\xff\xe1 # Use \xff\xd8\xff to cover both. with ops.name_scope(name, 'is_jpeg'): substr = string_ops.substr(contents, 0, 3) return math_ops.equal(substr, b'\xff\xd8\xff', name=name) def _is_png(contents, name=None): r"""Convenience function to check if the 'contents' encodes a PNG image. Args: contents: 0-D `string`. The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a PNG image. is_png is susceptible to false positives. """ with ops.name_scope(name, 'is_png'): substr = string_ops.substr(contents, 0, 3) return math_ops.equal(substr, b'\211PN', name=name) tf_export('io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg', v1=['io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg'])( gen_image_ops.decode_and_crop_jpeg) tf_export('io.decode_bmp', 'image.decode_bmp', v1=['io.decode_bmp', 'image.decode_bmp'])(gen_image_ops.decode_bmp) tf_export('io.decode_gif', 'image.decode_gif', v1=['io.decode_gif', 'image.decode_gif'])(gen_image_ops.decode_gif) tf_export('io.decode_jpeg', 'image.decode_jpeg', v1=['io.decode_jpeg', 'image.decode_jpeg'])(gen_image_ops.decode_jpeg) tf_export('io.decode_png', 'image.decode_png', v1=['io.decode_png', 'image.decode_png'])(gen_image_ops.decode_png) tf_export('io.encode_jpeg', 'image.encode_jpeg', v1=['io.encode_jpeg', 'image.encode_jpeg'])(gen_image_ops.encode_jpeg) tf_export('io.extract_jpeg_shape', 'image.extract_jpeg_shape', v1=['io.extract_jpeg_shape', 'image.extract_jpeg_shape'])( gen_image_ops.extract_jpeg_shape) @tf_export('io.decode_image', 'image.decode_image', v1=['io.decode_image', 'image.decode_image']) def decode_image(contents, channels=None, dtype=dtypes.uint8, name=None): """Convenience function for `decode_bmp`, `decode_gif`, `decode_jpeg`, and `decode_png`. Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the appropriate operation to convert the input bytes `string` into a `Tensor` of type `dtype`. Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as opposed to `decode_bmp`, `decode_jpeg` and `decode_png`, which return 3-D arrays `[height, width, num_channels]`. Make sure to take this into account when constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or PNG files. Args: contents: 0-D `string`. The encoded image bytes. channels: An optional `int`. Defaults to `0`. Number of color channels for the decoded image. dtype: The desired DType of the returned `Tensor`. name: A name for the operation (optional) Returns: `Tensor` with type `dtype` and shape `[height, width, num_channels]` for BMP, JPEG, and PNG images and shape `[num_frames, height, width, 3]` for GIF images. Raises: ValueError: On incorrect number of channels. """ with ops.name_scope(name, 'decode_image'): if channels not in (None, 0, 1, 3, 4): raise ValueError('channels must be in (None, 0, 1, 3, 4)') substr = string_ops.substr(contents, 0, 3) def _bmp(): """Decodes a GIF image.""" signature = string_ops.substr(contents, 0, 2) # Create assert op to check that bytes are BMP decodable is_bmp = math_ops.equal(signature, 'BM', name='is_bmp') decode_msg = 'Unable to decode bytes as JPEG, PNG, GIF, or BMP' assert_decode = control_flow_ops.Assert(is_bmp, [decode_msg]) bmp_channels = 0 if channels is None else channels good_channels = math_ops.not_equal(bmp_channels, 1, name='check_channels') channels_msg = 'Channels must be in (None, 0, 3) when decoding BMP images' assert_channels = control_flow_ops.Assert(good_channels, [channels_msg]) with ops.control_dependencies([assert_decode, assert_channels]): return convert_image_dtype(gen_image_ops.decode_bmp(contents), dtype) def _gif(): # Create assert to make sure that channels is not set to 1 # Already checked above that channels is in (None, 0, 1, 3) gif_channels = 0 if channels is None else channels good_channels = math_ops.logical_and( math_ops.not_equal(gif_channels, 1, name='check_gif_channels'), math_ops.not_equal(gif_channels, 4, name='check_gif_channels')) channels_msg = 'Channels must be in (None, 0, 3) when decoding GIF images' assert_channels = control_flow_ops.Assert(good_channels, [channels_msg]) with ops.control_dependencies([assert_channels]): return convert_image_dtype(gen_image_ops.decode_gif(contents), dtype) def check_gif(): # Create assert op to check that bytes are GIF decodable is_gif = math_ops.equal(substr, b'\x47\x49\x46', name='is_gif') return control_flow_ops.cond(is_gif, _gif, _bmp, name='cond_gif') def _png(): """Decodes a PNG image.""" return convert_image_dtype( gen_image_ops.decode_png(contents, channels, dtype=dtypes.uint8 if dtype == dtypes.uint8 else dtypes.uint16), dtype) def check_png(): """Checks if an image is PNG.""" return control_flow_ops.cond( _is_png(contents), _png, check_gif, name='cond_png') def _jpeg(): """Decodes a jpeg image.""" jpeg_channels = 0 if channels is None else channels good_channels = math_ops.not_equal( jpeg_channels, 4, name='check_jpeg_channels') channels_msg = ('Channels must be in (None, 0, 1, 3) when decoding JPEG ' 'images') assert_channels = control_flow_ops.Assert(good_channels, [channels_msg]) with ops.control_dependencies([assert_channels]): return convert_image_dtype( gen_image_ops.decode_jpeg(contents, channels), dtype) # Decode normal JPEG images (start with \xff\xd8\xff\xe0) # as well as JPEG images with EXIF data (start with \xff\xd8\xff\xe1). return control_flow_ops.cond( is_jpeg(contents), _jpeg, check_png, name='cond_jpeg') @tf_export('image.total_variation') def total_variation(images, name=None): """Calculate and return the total variation for one or more images. The total variation is the sum of the absolute differences for neighboring pixel-values in the input images. This measures how much noise is in the images. This can be used as a loss-function during optimization so as to suppress noise in images. If you have a batch of images, then you should calculate the scalar loss-value as the sum: `loss = tf.reduce_sum(tf.image.total_variation(images))` This implements the anisotropic 2-D version of the formula described here: https://en.wikipedia.org/wiki/Total_variation_denoising Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for the operation (optional). Raises: ValueError: if images.shape is not a 3-D or 4-D vector. Returns: The total variation of `images`. If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the total variation for each image in the batch. If `images` was 3-D, return a scalar float with the total variation for that image. """ with ops.name_scope(name, 'total_variation'): ndims = images.get_shape().ndims if ndims == 3: # The input is a single image with shape [height, width, channels]. # Calculate the difference of neighboring pixel-values. # The images are shifted one pixel along the height and width by slicing. pixel_dif1 = images[1:, :, :] - images[:-1, :, :] pixel_dif2 = images[:, 1:, :] - images[:, :-1, :] # Sum for all axis. (None is an alias for all axis.) sum_axis = None elif ndims == 4: # The input is a batch of images with shape: # [batch, height, width, channels]. # Calculate the difference of neighboring pixel-values. # The images are shifted one pixel along the height and width by slicing. pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :] pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :] # Only sum for the last 3 axis. # This results in a 1-D tensor with the total variation for each image. sum_axis = [1, 2, 3] else: raise ValueError('\'images\' must be either 3 or 4-dimensional.') # Calculate the total variation by taking the absolute value of the # pixel-differences and summing over the appropriate axis. tot_var = ( math_ops.reduce_sum(math_ops.abs(pixel_dif1), axis=sum_axis) + math_ops.reduce_sum(math_ops.abs(pixel_dif2), axis=sum_axis)) return tot_var @tf_export('image.sample_distorted_bounding_box', v1=[]) def sample_distorted_bounding_box_v2(image_size, bounding_boxes, seed=0, min_object_covered=0.1, aspect_ratio_range=None, area_range=None, max_attempts=None, use_image_if_no_bounding_boxes=None, name=None): """Generate a single randomly distorted bounding box for an image. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image. For example, ```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes, min_object_covered=0.1) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ``` Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: An optional `int`. Defaults to `0`. If `seed` is set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect `ratio = width / height` within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`. """ seed1, seed2 = random_seed.get_seed(seed) if seed else (0, 0) return sample_distorted_bounding_box( image_size, bounding_boxes, seed1, seed2, min_object_covered, aspect_ratio_range, area_range, max_attempts, use_image_if_no_bounding_boxes, name) @tf_export(v1=['image.sample_distorted_bounding_box']) @deprecation.deprecated(date=None, instructions='`seed2` arg is deprecated.' 'Use sample_distorted_bounding_box_v2 instead.') def sample_distorted_bounding_box(image_size, bounding_boxes, seed=None, seed2=None, min_object_covered=0.1, aspect_ratio_range=None, area_range=None, max_attempts=None, use_image_if_no_bounding_boxes=None, name=None): """Generate a single randomly distorted bounding box for an image. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image. For example, ```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes, min_object_covered=0.1) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ``` Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: An optional `int`. Defaults to `0`. If either `seed` or `seed2` are set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. seed2: An optional `int`. Defaults to `0`. A second seed to avoid seed collision. min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`. """ with ops.name_scope(name, 'sample_distorted_bounding_box'): return gen_image_ops.sample_distorted_bounding_box_v2( image_size, bounding_boxes, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes, name=name) @tf_export('image.non_max_suppression') def non_max_suppression(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices = tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. """ with ops.name_scope(name, 'non_max_suppression'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') return gen_image_ops.non_max_suppression_v3(boxes, scores, max_output_size, iou_threshold, score_threshold) @tf_export('image.non_max_suppression_padded') def non_max_suppression_padded(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), pad_to_max_output_size=False, name=None): """Greedily selects a subset of bounding boxes in descending order of score. Performs algorithmically equivalent operation to tf.image.non_max_suppression, with the addition of an optional parameter which zero-pads the output to be of size `max_output_size`. The output of this operation is a tuple containing the set of integers indexing into the input collection of bounding boxes representing the selected boxes and the number of valid indices in the index set. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.slice` and `tf.gather` operations. For example: ```python selected_indices_padded, num_valid = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size=True) selected_indices = tf.slice( selected_indices_padded, tf.constant([0]), num_valid) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. pad_to_max_output_size: bool. If True, size of `selected_indices` output is padded to `max_output_size`. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. valid_outputs: A scalar integer `Tensor` denoting how many elements in `selected_indices` are valid. Valid elements occur first, then padding. """ with ops.name_scope(name, 'non_max_suppression_padded'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') if compat.forward_compatible(2018, 8, 7) or pad_to_max_output_size: return gen_image_ops.non_max_suppression_v4( boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size) else: return gen_image_ops.non_max_suppression_v3( boxes, scores, max_output_size, iou_threshold, score_threshold) @tf_export('image.non_max_suppression_overlaps') def non_max_suppression_with_overlaps(overlaps, scores, max_output_size, overlap_threshold=0.5, score_threshold=float('-inf'), name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high overlap with previously selected boxes. N-by-n overlap values are supplied as square matrix. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices = tf.image.non_max_suppression_overlaps( overlaps, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: overlaps: A 2-D float `Tensor` of shape `[num_boxes, num_boxes]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. overlap_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to the provided overlap values. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the overlaps tensor, where `M <= max_output_size`. """ with ops.name_scope(name, 'non_max_suppression_overlaps'): overlap_threshold = ops.convert_to_tensor( overlap_threshold, name='overlap_threshold') # pylint: disable=protected-access return gen_image_ops.non_max_suppression_with_overlaps( overlaps, scores, max_output_size, overlap_threshold, score_threshold) # pylint: enable=protected-access _rgb_to_yiq_kernel = [[0.299, 0.59590059, 0.2115], [0.587, -0.27455667, -0.52273617], [0.114, -0.32134392, 0.31119955]] @tf_export('image.rgb_to_yiq') def rgb_to_yiq(images): """Converts one or more images from RGB to YIQ. Outputs a tensor of the same shape as the `images` tensor, containing the YIQ value of the pixels. The output is only well defined if the value in images are in [0,1]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _rgb_to_yiq_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _yiq_to_rgb_kernel = [[1, 1, 1], [0.95598634, -0.27201283, -1.10674021], [0.6208248, -0.64720424, 1.70423049]] @tf_export('image.yiq_to_rgb') def yiq_to_rgb(images): """Converts one or more images from YIQ to RGB. Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], I value are in [-0.5957,0.5957] and Q value are in [-0.5226,0.5226]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _yiq_to_rgb_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _rgb_to_yuv_kernel = [[0.299, -0.14714119, 0.61497538], [0.587, -0.28886916, -0.51496512], [0.114, 0.43601035, -0.10001026]] @tf_export('image.rgb_to_yuv') def rgb_to_yuv(images): """Converts one or more images from RGB to YUV. Outputs a tensor of the same shape as the `images` tensor, containing the YUV value of the pixels. The output is only well defined if the value in images are in [0,1]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _rgb_to_yuv_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _yuv_to_rgb_kernel = [[1, 1, 1], [0, -0.394642334, 2.03206185], [1.13988303, -0.58062185, 0]] @tf_export('image.yuv_to_rgb') def yuv_to_rgb(images): """Converts one or more images from YUV to RGB. Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], U and V value are in [-0.5,0.5]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _yuv_to_rgb_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) def _verify_compatible_image_shapes(img1, img2): """Checks if two image tensors are compatible for applying SSIM or PSNR. This function checks if two sets of images have ranks at least 3, and if the last three dimensions match. Args: img1: Tensor containing the first image batch. img2: Tensor containing the second image batch. Returns: A tuple containing: the first tensor shape, the second tensor shape, and a list of control_flow_ops.Assert() ops implementing the checks. Raises: ValueError: When static shape check fails. """ shape1 = img1.get_shape().with_rank_at_least(3) shape2 = img2.get_shape().with_rank_at_least(3) shape1[-3:].assert_is_compatible_with(shape2[-3:]) if shape1.ndims is not None and shape2.ndims is not None: for dim1, dim2 in zip(reversed(shape1.dims[:-3]), reversed(shape2.dims[:-3])): if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)): raise ValueError( 'Two images are not compatible: %s and %s' % (shape1, shape2)) # Now assign shape tensors. shape1, shape2 = array_ops.shape_n([img1, img2]) # TODO(sjhwang): Check if shape1[:-3] and shape2[:-3] are broadcastable. checks = [] checks.append(control_flow_ops.Assert( math_ops.greater_equal(array_ops.size(shape1), 3), [shape1, shape2], summarize=10)) checks.append(control_flow_ops.Assert( math_ops.reduce_all(math_ops.equal(shape1[-3:], shape2[-3:])), [shape1, shape2], summarize=10)) return shape1, shape2, checks @tf_export('image.psnr') def psnr(a, b, max_val, name=None): """Returns the Peak Signal-to-Noise Ratio between a and b. This is intended to be used on signals (or images). Produces a PSNR value for each image in batch. The last three dimensions of input are expected to be [height, width, depth]. Example: ```python # Read images from file. im1 = tf.decode_png('path/to/im1.png') im2 = tf.decode_png('path/to/im2.png') # Compute PSNR over tf.uint8 Tensors. psnr1 = tf.image.psnr(im1, im2, max_val=255) # Compute PSNR over tf.float32 Tensors. im1 = tf.image.convert_image_dtype(im1, tf.float32) im2 = tf.image.convert_image_dtype(im2, tf.float32) psnr2 = tf.image.psnr(im1, im2, max_val=1.0) # psnr1 and psnr2 both have type tf.float32 and are almost equal. ``` Arguments: a: First set of images. b: Second set of images. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). name: Namespace to embed the computation in. Returns: The scalar PSNR between a and b. The returned tensor has type `tf.float32` and shape [batch_size, 1]. """ with ops.name_scope(name, 'PSNR', [a, b]): # Need to convert the images to float32. Scale max_val accordingly so that # PSNR is computed correctly. max_val = math_ops.cast(max_val, a.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) a = convert_image_dtype(a, dtypes.float32) b = convert_image_dtype(b, dtypes.float32) mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1]) psnr_val = math_ops.subtract( 20 * math_ops.log(max_val) / math_ops.log(10.0), np.float32(10 / np.log(10)) * math_ops.log(mse), name='psnr') _, _, checks = _verify_compatible_image_shapes(a, b) with ops.control_dependencies(checks): return array_ops.identity(psnr_val) _SSIM_K1 = 0.01 _SSIM_K2 = 0.03 def _ssim_helper(x, y, reducer, max_val, compensation=1.0): r"""Helper function for computing SSIM. SSIM estimates covariances with weighted sums. The default parameters use a biased estimate of the covariance: Suppose `reducer` is a weighted sum, then the mean estimators are \mu_x = \sum_i w_i x_i, \mu_y = \sum_i w_i y_i, where w_i's are the weighted-sum weights, and covariance estimator is cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y) with assumption \sum_i w_i = 1. This covariance estimator is biased, since E[cov_{xy}] = (1 - \sum_i w_i ^ 2) Cov(X, Y). For SSIM measure with unbiased covariance estimators, pass as `compensation` argument (1 - \sum_i w_i ^ 2). Arguments: x: First set of images. y: Second set of images. reducer: Function that computes 'local' averages from set of images. For non-covolutional version, this is usually tf.reduce_mean(x, [1, 2]), and for convolutional version, this is usually tf.nn.avg_pool or tf.nn.conv2d with weighted-sum kernel. max_val: The dynamic range (i.e., the difference between the maximum possible allowed value and the minimum allowed value). compensation: Compensation factor. See above. Returns: A pair containing the luminance measure, and the contrast-structure measure. """ c1 = (_SSIM_K1 * max_val) ** 2 c2 = (_SSIM_K2 * max_val) ** 2 # SSIM luminance measure is # (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1). mean0 = reducer(x) mean1 = reducer(y) num0 = mean0 * mean1 * 2.0 den0 = math_ops.square(mean0) + math_ops.square(mean1) luminance = (num0 + c1) / (den0 + c1) # SSIM contrast-structure measure is # (2 * cov_{xy} + c2) / (cov_{xx} + cov_{yy} + c2). # Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then # cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y) # = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j). num1 = reducer(x * y) * 2.0 den1 = reducer(math_ops.square(x) + math_ops.square(y)) c2 *= compensation cs = (num1 - num0 + c2) / (den1 - den0 + c2) # SSIM score is the product of the luminance and contrast-structure measures. return luminance, cs def _fspecial_gauss(size, sigma): """Function to mimic the 'fspecial' gaussian MATLAB function.""" size = ops.convert_to_tensor(size, dtypes.int32) sigma = ops.convert_to_tensor(sigma) coords = math_ops.cast(math_ops.range(size), sigma.dtype) coords -= math_ops.cast(size - 1, sigma.dtype) / 2.0 g = math_ops.square(coords) g *= -0.5 / math_ops.square(sigma) g = array_ops.reshape(g, shape=[1, -1]) + array_ops.reshape(g, shape=[-1, 1]) g = array_ops.reshape(g, shape=[1, -1]) # For tf.nn.softmax(). g = nn_ops.softmax(g) return array_ops.reshape(g, shape=[size, size, 1, 1]) def _ssim_per_channel(img1, img2, max_val=1.0): """Computes SSIM index between img1 and img2 per color channel. This function matches the standard SSIM implementation from: Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. Details: - 11x11 Gaussian filter of width 1.5 is used. - k1 = 0.01, k2 = 0.03 as in the original paper. Args: img1: First image batch. img2: Second image batch. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). Returns: A pair of tensors containing and channel-wise SSIM and contrast-structure values. The shape is [..., channels]. """ filter_size = constant_op.constant(11, dtype=dtypes.int32) filter_sigma = constant_op.constant(1.5, dtype=img1.dtype) shape1, shape2 = array_ops.shape_n([img1, img2]) checks = [ control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal( shape1[-3:-1], filter_size)), [shape1, filter_size], summarize=8), control_flow_ops.Assert(math_ops.reduce_all(math_ops.greater_equal( shape2[-3:-1], filter_size)), [shape2, filter_size], summarize=8)] # Enforce the check to run before computation. with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # TODO(sjhwang): Try to cache kernels and compensation factor. kernel = _fspecial_gauss(filter_size, filter_sigma) kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1]) # The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`, # but to match MATLAB implementation of MS-SSIM, we use 1.0 instead. compensation = 1.0 # TODO(sjhwang): Try FFT. # TODO(sjhwang): Gaussian kernel is separable in space. Consider applying # 1-by-n and n-by-1 Gaussain filters instead of an n-by-n filter. def reducer(x): shape = array_ops.shape(x) x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0)) y = nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID') return array_ops.reshape(y, array_ops.concat([shape[:-3], array_ops.shape(y)[1:]], 0)) luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation) # Average over the second and the third from the last: height, width. axes = constant_op.constant([-3, -2], dtype=dtypes.int32) ssim_val = math_ops.reduce_mean(luminance * cs, axes) cs = math_ops.reduce_mean(cs, axes) return ssim_val, cs @tf_export('image.ssim') def ssim(img1, img2, max_val): """Computes SSIM index between img1 and img2. This function is based on the standard SSIM implementation from: Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. Note: The true SSIM is only defined on grayscale. This function does not perform any colorspace transform. (If input is already YUV, then it will compute YUV SSIM average.) Details: - 11x11 Gaussian filter of width 1.5 is used. - k1 = 0.01, k2 = 0.03 as in the original paper. The image sizes must be at least 11x11 because of the filter size. Example: ```python # Read images from file. im1 = tf.decode_png('path/to/im1.png') im2 = tf.decode_png('path/to/im2.png') # Compute SSIM over tf.uint8 Tensors. ssim1 = tf.image.ssim(im1, im2, max_val=255) # Compute SSIM over tf.float32 Tensors. im1 = tf.image.convert_image_dtype(im1, tf.float32) im2 = tf.image.convert_image_dtype(im2, tf.float32) ssim2 = tf.image.ssim(im1, im2, max_val=1.0) # ssim1 and ssim2 both have type tf.float32 and are almost equal. ``` Args: img1: First image batch. img2: Second image batch. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). Returns: A tensor containing an SSIM value for each image in batch. Returned SSIM values are in range (-1, 1], when pixel values are non-negative. Returns a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]). """ _, _, checks = _verify_compatible_image_shapes(img1, img2) with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # Need to convert the images to float32. Scale max_val accordingly so that # SSIM is computed correctly. max_val = math_ops.cast(max_val, img1.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) img1 = convert_image_dtype(img1, dtypes.float32) img2 = convert_image_dtype(img2, dtypes.float32) ssim_per_channel, _ = _ssim_per_channel(img1, img2, max_val) # Compute average over color channels. return math_ops.reduce_mean(ssim_per_channel, [-1]) # Default values obtained by Wang et al. _MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333) @tf_export('image.ssim_multiscale') def ssim_multiscale(img1, img2, max_val, power_factors=_MSSSIM_WEIGHTS): """Computes the MS-SSIM between img1 and img2. This function assumes that `img1` and `img2` are image batches, i.e. the last three dimensions are [height, width, channels]. Note: The true SSIM is only defined on grayscale. This function does not perform any colorspace transform. (If input is already YUV, then it will compute YUV SSIM average.) Original paper: Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. "Multiscale structural similarity for image quality assessment." Signals, Systems and Computers, 2004. Arguments: img1: First image batch. img2: Second image batch. Must have the same rank as img1. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). power_factors: Iterable of weights for each of the scales. The number of scales used is the length of the list. Index 0 is the unscaled resolution's weight and each increasing scale corresponds to the image being downsampled by 2. Defaults to (0.0448, 0.2856, 0.3001, 0.2363, 0.1333), which are the values obtained in the original paper. Returns: A tensor containing an MS-SSIM value for each image in batch. The values are in range [0, 1]. Returns a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]). """ # Shape checking. shape1 = img1.get_shape().with_rank_at_least(3) shape2 = img2.get_shape().with_rank_at_least(3) shape1[-3:].merge_with(shape2[-3:]) with ops.name_scope(None, 'MS-SSIM', [img1, img2]): shape1, shape2, checks = _verify_compatible_image_shapes(img1, img2) with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # Need to convert the images to float32. Scale max_val accordingly so that # SSIM is computed correctly. max_val = math_ops.cast(max_val, img1.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) img1 = convert_image_dtype(img1, dtypes.float32) img2 = convert_image_dtype(img2, dtypes.float32) imgs = [img1, img2] shapes = [shape1, shape2] # img1 and img2 are assumed to be a (multi-dimensional) batch of # 3-dimensional images (height, width, channels). `heads` contain the batch # dimensions, and `tails` contain the image dimensions. heads = [s[:-3] for s in shapes] tails = [s[-3:] for s in shapes] divisor = [1, 2, 2, 1] divisor_tensor = constant_op.constant(divisor[1:], dtype=dtypes.int32) def do_pad(images, remainder): padding = array_ops.expand_dims(remainder, -1) padding = array_ops.pad(padding, [[1, 0], [1, 0]]) return [array_ops.pad(x, padding, mode='SYMMETRIC') for x in images] mcs = [] for k in range(len(power_factors)): with ops.name_scope(None, 'Scale%d' % k, imgs): if k > 0: # Avg pool takes rank 4 tensors. Flatten leading dimensions. flat_imgs = [ array_ops.reshape(x, array_ops.concat([[-1], t], 0)) for x, t in zip(imgs, tails) ] remainder = tails[0] % divisor_tensor need_padding = math_ops.reduce_any(math_ops.not_equal(remainder, 0)) # pylint: disable=cell-var-from-loop padded = control_flow_ops.cond(need_padding, lambda: do_pad(flat_imgs, remainder), lambda: flat_imgs) # pylint: enable=cell-var-from-loop downscaled = [nn_ops.avg_pool(x, ksize=divisor, strides=divisor, padding='VALID') for x in padded] tails = [x[1:] for x in array_ops.shape_n(downscaled)] imgs = [ array_ops.reshape(x, array_ops.concat([h, t], 0)) for x, h, t in zip(downscaled, heads, tails) ] # Overwrite previous ssim value since we only need the last one. ssim_per_channel, cs = _ssim_per_channel(*imgs, max_val=max_val) mcs.append(nn_ops.relu(cs)) # Remove the cs score for the last scale. In the MS-SSIM calculation, # we use the l(p) at the highest scale. l(p) * cs(p) is ssim(p). mcs.pop() # Remove the cs score for the last scale. mcs_and_ssim = array_ops.stack(mcs + [nn_ops.relu(ssim_per_channel)], axis=-1) # Take weighted geometric mean across the scale axis. ms_ssim = math_ops.reduce_prod(math_ops.pow(mcs_and_ssim, power_factors), [-1]) return math_ops.reduce_mean(ms_ssim, [-1]) # Avg over color channels. @tf_export('image.image_gradients') def image_gradients(image): """Returns image gradients (dy, dx) for each color channel. Both output tensors have the same shape as the input: [batch_size, h, w, d]. The gradient values are organized so that [I(x+1, y) - I(x, y)] is in location (x, y). That means that dy will always have zeros in the last row, and dx will always have zeros in the last column. Arguments: image: Tensor with shape [batch_size, h, w, d]. Returns: Pair of tensors (dy, dx) holding the vertical and horizontal image gradients (1-step finite difference). Raises: ValueError: If `image` is not a 4D tensor. """ if image.get_shape().ndims != 4: raise ValueError('image_gradients expects a 4D tensor ' '[batch_size, h, w, d], not %s.', image.get_shape()) image_shape = array_ops.shape(image) batch_size, height, width, depth = array_ops.unstack(image_shape) dy = image[:, 1:, :, :] - image[:, :-1, :, :] dx = image[:, :, 1:, :] - image[:, :, :-1, :] # Return tensors with same size as original image by concatenating # zeros. Place the gradient [I(x+1,y) - I(x,y)] on the base pixel (x, y). shape = array_ops.stack([batch_size, 1, width, depth]) dy = array_ops.concat([dy, array_ops.zeros(shape, image.dtype)], 1) dy = array_ops.reshape(dy, image_shape) shape = array_ops.stack([batch_size, height, 1, depth]) dx = array_ops.concat([dx, array_ops.zeros(shape, image.dtype)], 2) dx = array_ops.reshape(dx, image_shape) return dy, dx @tf_export('image.sobel_edges') def sobel_edges(image): """Returns a tensor holding Sobel edge maps. Arguments: image: Image tensor with shape [batch_size, h, w, d] and type float32 or float64. The image(s) must be 2x2 or larger. Returns: Tensor holding edge maps for each channel. Returns a tensor with shape [batch_size, h, w, d, 2] where the last two dimensions hold [[dy[0], dx[0]], [dy[1], dx[1]], ..., [dy[d-1], dx[d-1]]] calculated using the Sobel filter. """ # Define vertical and horizontal Sobel filters. static_image_shape = image.get_shape() image_shape = array_ops.shape(image) kernels = [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]], [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]] num_kernels = len(kernels) kernels = np.transpose(np.asarray(kernels), (1, 2, 0)) kernels = np.expand_dims(kernels, -2) kernels_tf = constant_op.constant(kernels, dtype=image.dtype) kernels_tf = array_ops.tile(kernels_tf, [1, 1, image_shape[-1], 1], name='sobel_filters') # Use depth-wise convolution to calculate edge maps per channel. pad_sizes = [[0, 0], [1, 1], [1, 1], [0, 0]] padded = array_ops.pad(image, pad_sizes, mode='REFLECT') # Output tensor has shape [batch_size, h, w, d * num_kernels]. strides = [1, 1, 1, 1] output = nn.depthwise_conv2d(padded, kernels_tf, strides, 'VALID') # Reshape to [batch_size, h, w, d, num_kernels]. shape = array_ops.concat([image_shape, [num_kernels]], 0) output = array_ops.reshape(output, shape=shape) output.set_shape(static_image_shape.concatenate([num_kernels])) return output def resize_bicubic(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_bicubic( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) def resize_bilinear(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_bilinear( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) def resize_nearest_neighbor(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_nearest_neighbor( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) resize_area_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.AREA...)` instead.')) tf_export(v1=['image.resize_area'])( resize_area_deprecation(gen_image_ops.resize_area)) resize_bicubic_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.BICUBIC...)` instead.')) tf_export(v1=['image.resize_bicubic'])( resize_bicubic_deprecation(resize_bicubic)) resize_bilinear_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.BILINEAR...)` instead.')) tf_export(v1=['image.resize_bilinear'])( resize_bilinear_deprecation(resize_bilinear)) resize_nearest_neighbor_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` ' 'instead.')) tf_export(v1=['image.resize_nearest_neighbor'])( resize_nearest_neighbor_deprecation(resize_nearest_neighbor)) @tf_export('image.crop_and_resize', v1=[]) def crop_and_resize_v2( image, boxes, box_indices, crop_size, method='bilinear', extrapolation_value=0, name=None): """Extracts crops from the input image tensor and resizes them. Extracts crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) to a common output size specified by `crop_size`. This is more general than the `crop_to_bounding_box` op which extracts a fixed size slice from the input image and does not allow resizing or aspect ratio change. Returns a tensor with `crops` from the input `image` at positions defined at the bounding box locations in `boxes`. The cropped boxes are all resized (with bilinear or nearest neighbor interpolation) to a fixed `size = [crop_height, crop_width]`. The result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical results to using `tf.image.resize_bilinear()` or `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with `align_corners=True`. Args: image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. Both `image_height` and `image_width` need to be positive. boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image height is mapped to `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use `extrapolation_value` to extrapolate the input image values. box_indices: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. The value of `box_ind[i]` specifies the image that the `i`-th box refers to. crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All cropped image patches are resized to this size. The aspect ratio of the image content is not preserved. Both `crop_height` and `crop_width` need to be positive. method: An optional string specifying the sampling method for resizing. It can be either `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling methods are supported: Bilinear and Nearest Neighbor. extrapolation_value: An optional `float`. Defaults to `0`. Value used for extrapolation, when applicable. name: A name for the operation (optional). Returns: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. """ return gen_image_ops.crop_and_resize( image, boxes, box_indices, crop_size, method, extrapolation_value, name) @tf_export(v1=['image.crop_and_resize']) @deprecation.deprecated_args( None, 'box_ind is deprecated, use box_indices instead', 'box_ind') def crop_and_resize_v1( # pylint: disable=missing-docstring image, boxes, box_ind=None, crop_size=None, method='bilinear', extrapolation_value=0, name=None, box_indices=None): box_ind = deprecation.deprecated_argument_lookup( "box_indices", box_indices, "box_ind", box_ind) return gen_image_ops.crop_and_resize( image, boxes, box_ind, crop_size, method, extrapolation_value, name) crop_and_resize_v1.__doc__ = gen_image_ops.crop_and_resize.__doc__ @tf_export(v1=['image.extract_glimpse']) def extract_glimpse( input, # pylint: disable=redefined-builtin size, offsets, centered=True, normalized=True, uniform_noise=True, name=None): """Extracts a glimpse from the input tensor. Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non overlapping areas will be filled with random noise. The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter. The argument `normalized` and `centered` controls how the windows are built: * If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension. * If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0). * If the coordinates are not normalized they are interpreted as numbers of pixels. Args: input: A `Tensor` of type `float32`. A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape `[batch_size, 2]` containing the y, x locations of the center of each window. centered: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. normalized: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are normalized. uniform_noise: An optional `bool`. Defaults to `True`. indicates if the noise should be generated using a uniform distribution or a Gaussian distribution. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ return gen_image_ops.extract_glimpse( input=input, size=size, offsets=offsets, centered=centered, normalized=normalized, uniform_noise=uniform_noise, name=name) @tf_export('image.extract_glimpse', v1=[]) def extract_glimpse_v2( input, # pylint: disable=redefined-builtin size, offsets, centered=True, normalized=True, noise='uniform', name=None): """Extracts a glimpse from the input tensor. Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non overlapping areas will be filled with random noise. The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter. The argument `normalized` and `centered` controls how the windows are built: * If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension. * If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0). * If the coordinates are not normalized they are interpreted as numbers of pixels. Args: input: A `Tensor` of type `float32`. A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape `[batch_size, 2]` containing the y, x locations of the center of each window. centered: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. normalized: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are normalized. noise: An optional `string`. Defaults to `uniform`. indicates if the noise should be `uniform` (uniform distribution), `gaussian` (gaussian distribution), or `zero` (zero padding). name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ return gen_image_ops.extract_glimpse( input=input, size=size, offsets=offsets, centered=centered, normalized=normalized, noise=noise, uniform_noise=False, name=name) @tf_export('image.combined_non_max_suppression') def combined_non_max_suppression(boxes, scores, max_output_size_per_class, max_total_size, iou_threshold=0.5, score_threshold=float('-inf'), pad_per_class=False, name=None): """Greedily selects a subset of bounding boxes in descending order of score. This operation performs non_max_suppression on the inputs per batch, across all classes. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Also note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is the final boxes, scores and classes tensor returned after performing non_max_suppression. Args: boxes: A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. scores: A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]` representing a single score corresponding to each box (each row of boxes). max_output_size_per_class: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression per class max_total_size: A scalar representing maximum number of boxes retained over all classes. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. pad_per_class: If false, the output nmsed boxes, scores and classes are padded/clipped to `max_total_size`. If true, the output nmsed boxes, scores and classes are padded to be of length `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in which case it is clipped to `max_total_size`. Defaults to false. name: A name for the operation (optional). Returns: 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes. 'valid_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top valid_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. """ with ops.name_scope(name, 'combined_non_max_suppression'): iou_threshold = ops.convert_to_tensor( iou_threshold, dtype=dtypes.float32, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, dtype=dtypes.float32, name='score_threshold') return gen_image_ops.combined_non_max_suppression( boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold, pad_per_class)
ageron/tensorflow
tensorflow/python/ops/image_ops_impl.py
Python
apache-2.0
139,423
[ "Gaussian" ]
777900b77a5aed26c57148de7a6860ad0fda1ab0f6dc81639e422e4768625d82
"""pyzmq logging handlers. This mainly defines the PUBHandler object for publishing logging messages over a zmq.PUB socket. The PUBHandler can be used with the regular logging module, as in:: >>> import logging >>> handler = PUBHandler('tcp://127.0.0.1:12345') >>> handler.root_topic = 'foo' >>> logger = logging.getLogger('foobar') >>> logger.setLevel(logging.DEBUG) >>> logger.addHandler(handler) After this point, all messages logged by ``logger`` will be published on the PUB socket. Code adapted from StarCluster: http://github.com/jtriley/StarCluster/blob/master/starcluster/logger.py Authors ------- * Min RK """ #----------------------------------------------------------------------------- # Copyright (c) 2010 Brian Granger, Min Ragan-Kelley # # This file is part of pyzmq # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import logging from logging import INFO, DEBUG, WARN, ERROR, FATAL import zmq from zmq.utils.strtypes import bytes, unicode, cast_bytes #----------------------------------------------------------------------------- # Code #----------------------------------------------------------------------------- TOPIC_DELIM="::" # delimiter for splitting topics on the receiving end. class PUBHandler(logging.Handler): """A basic logging handler that emits log messages through a PUB socket. Takes a PUB socket already bound to interfaces or an interface to bind to. Example:: sock = context.socket(zmq.PUB) sock.bind('inproc://log') handler = PUBHandler(sock) Or:: handler = PUBHandler('inproc://loc') These are equivalent. Log messages handled by this handler are broadcast with ZMQ topics ``this.root_topic`` comes first, followed by the log level (DEBUG,INFO,etc.), followed by any additional subtopics specified in the message by: log.debug("subtopic.subsub::the real message") """ root_topic="" socket = None formatters = { logging.DEBUG: logging.Formatter( "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), logging.INFO: logging.Formatter("%(message)s\n"), logging.WARN: logging.Formatter( "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), logging.ERROR: logging.Formatter( "%(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n"), logging.CRITICAL: logging.Formatter( "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n")} def __init__(self, interface_or_socket, context=None): logging.Handler.__init__(self) if isinstance(interface_or_socket, zmq.Socket): self.socket = interface_or_socket self.ctx = self.socket.context else: self.ctx = context or zmq.Context() self.socket = self.ctx.socket(zmq.PUB) self.socket.bind(interface_or_socket) def format(self,record): """Format a record.""" return self.formatters[record.levelno].format(record) def emit(self, record): """Emit a log message on my socket.""" try: topic, record.msg = record.msg.split(TOPIC_DELIM,1) except Exception: topic = "" try: bmsg = cast_bytes(self.format(record)) except Exception: self.handleError(record) return topic_list = [] if self.root_topic: topic_list.append(self.root_topic) topic_list.append(record.levelname) if topic: topic_list.append(topic) btopic = b'.'.join(cast_bytes(t) for t in topic_list) self.socket.send_multipart([btopic, bmsg]) class TopicLogger(logging.Logger): """A simple wrapper that takes an additional argument to log methods. All the regular methods exist, but instead of one msg argument, two arguments: topic, msg are passed. That is:: logger.debug('msg') Would become:: logger.debug('topic.sub', 'msg') """ def log(self, level, topic, msg, *args, **kwargs): """Log 'msg % args' with level and topic. To pass exception information, use the keyword argument exc_info with a True value:: logger.log(level, "zmq.fun", "We have a %s", "mysterious problem", exc_info=1) """ logging.Logger.log(self, level, '%s::%s'%(topic,msg), *args, **kwargs) # Generate the methods of TopicLogger, since they are just adding a # topic prefix to a message. for name in "debug warn warning error critical fatal".split(): meth = getattr(logging.Logger,name) setattr(TopicLogger, name, lambda self, level, topic, msg, *args, **kwargs: meth(self, level, topic+TOPIC_DELIM+msg,*args, **kwargs))
IsCoolEntertainment/debpkg_python-pyzmq
zmq/log/handlers.py
Python
lgpl-3.0
5,169
[ "Brian" ]
f828835cee9a12b688438b36f1847da75da32130fb3f578a96b86fc7af0a4988
#!/usr/bin/env python ## category General ## desc Postprocesses a BAM file to rename pairs that have an extra /N value ''' Postprocesses a BAM file to rename pairs that have an extra /N value Some aligners output paired end reads with names ending in /1 or /2 to signify where the read came from in a paired end experiment. This can cause problems with downstream analysis packages that expect paired end reads to have the exact same name. ''' import sys import os import pysam from ngsutils.bam import bam_iter def bam_renamepair(infile, outfile, delim='/'): bam = pysam.Samfile(infile, "rb") out = pysam.Samfile(outfile, "wb", template=bam) for read in bam_iter(bam): read_renamepair(read, delim) out.write(read) bam.close() out.close() def read_renamepair(read, delim): if delim in read.qname: name, num = read.qname.rsplit(delim, 1) read.tags = read.tags + [('ZN', num)] read.qname = name def usage(): print __doc__ print """Usage: bamutils renamepair {opts} inbamfile outbamfile Options: -f Force overwriting an existing outfile -delim val The trailing delimiter to use (default '/') """ sys.exit(-1) if __name__ == "__main__": infile = None outfile = None delim = '/' last = None force = False for arg in sys.argv[1:]: if last == '-delim': delim = arg last = None elif arg == "-h": usage() elif arg == "-delim": last = arg elif arg == "-f": force = True elif not infile: if os.path.exists(os.path.expanduser(arg)): infile = os.path.expanduser(arg) else: sys.stderr.write("File: %s not found!" % arg) usage() elif not outfile: if force or not os.path.exists(os.path.expanduser(arg)): outfile = arg else: sys.stderr.write( "File: %s exists! Not overwriting without -f force." % arg ) usage() else: usage() if not infile or not outfile: usage() bam_renamepair(infile, outfile, delim)
ngsutils/ngsutils
ngsutils/bam/renamepair.py
Python
bsd-3-clause
2,243
[ "pysam" ]
3c21953fc9b61325db04fd7073f48d460e911beb9b463db234b7bdb4f8eaea17
# This is the configuration file for your powerline-shell prompt # Every time you make a change to this file, run install.py to apply changes # # For instructions on how to use the powerline-shell.py script, see the README # Add, remove or rearrange these segments to customize what you see on the shell # prompt. Any segment you add must be present in the segments/ directory SEGMENTS = [ # Set the terminal window title to user@host:dir 'set_term_title', # Show current virtual environment (see http://www.virtualenv.org/) 'virtual_env', # Show the current user's username as in ordinary prompts 'username', # Show the machine's hostname. Mostly used when ssh-ing into other machines 'hostname', # Show a padlock when ssh-ing from another machine 'ssh', # Show the current directory. If the path is too long, the middle part is # replaced with ellipsis ('...') 'cwd', # Show a padlock if the current user has no write access to the current # directory 'read_only', # Show the current git branch and status 'git', # Show the current mercurial branch and status # 'hg', # Show the current svn branch and status 'svn', # Show the current fossil branch and status # 'fossil', # Show number of running jobs 'jobs', # Show the last command's exit code if it was non-zero 'exit_code', # Shows a '#' if the current user is root, '$' otherwise # Also, changes color if the last command exited with a non-zero error code 'root', ] # Change the colors used to draw individual segments in your prompt THEME = 'moe'
Menci/powerline-shell
config.py
Python
mit
1,575
[ "MOE" ]
553bc301425778c10888e4dbf009af1d06623bd58ab9d72cc5e8e8cf83a8f515
#!/usr/bin/env python #encoding=utf8 #Copyright [2014] [Wei Zhang] #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. ################################################################### # Date: 2014/3/18 # # Call model.py to do pairwise learning # ################################################################### import sys, csv, json, argparse sys.path.append("../") from data_io import write_submission #from model import BPR #from model1 import BPR from model2 import BPR settings = json.loads(open("../../SETTINGS.json").read()) def genTrainFile(behavior_num): data = csv.reader(open(settings["TRAIN_DATA_FILE"])) skip_header_data = [] for i, entry in enumerate(data): if i != 0: skip_header_data.append(entry) data = [map(int, feature) for feature in skip_header_data] train_data = [] for entry in data: action_type = entry[2] uid = entry[0] pid = entry[1] if action_type == settings["ACTION_BUY"]: train_data.append([uid, pid, 1]) if behavior_num == settings["BEHAVIOR_TRIPLE"] and action_type == settings["ACTION_CLICK"]: train_data.append([uid, pid, 0]) writer = csv.writer(open(settings['BPR_TRAIN_FILE'], 'w')) writer.writerows(train_data) def genTrainFile1(behavior_num): data = csv.reader(open(settings["TRAIN_DATA_FILE"])) skip_header_data = [] for i, entry in enumerate(data): if i != 0: skip_header_data.append(entry) data = [map(int, feature) for feature in skip_header_data] train_data = [] for entry in data: action_type = entry[2] uid = entry[0] pid = entry[1] if action_type == settings["ACTION_CLICK"]: train_data.append([uid, pid, 0]) writer = csv.writer(open(settings['BPR_TRAIN_FILE'], 'w')) writer.writerows(train_data) def genTrainFile2(): data = [entry for entry in csv.reader(open(settings["TRAIN_DATA_FILE"]))] data = [map(int, entry) for entry in data[1:]] train_data = [] for entry in data: uid, pid, action_type = entry[0:3] if action_type == settings["ACTION_CLICK"]: train_data.append([uid, pid, 1]) elif action_type == settings["ACTION_COLLECT"] or action_type == settings["ACTION_SHOPPING_CHART"]: train_data.append([uid, pid, 2]) elif action_type == settings["ACTION_BUY"]: train_data.append([uid, pid, 3]) writer = csv.writer(open(settings['BPR_TRAIN_FILE'], 'w')) writer.writerows(train_data) def main(): parser = argparse.ArgumentParser() parser.add_argument('-Csampling', type=str, action='store', dest='sample_method', help='specify which sampling method.\n' 'Currently including three sampling method:\t1.uniform\n\t' '2.adaptive pairwise sampling') parser.add_argument('-Cbehavior', type=str, action='store', dest='behavior_num', help='specify whether to utilize various behaviours of users') parser.add_argument('-Init', type=str, action='store', dest='init_choice', help='specify which method to initialize model parameters') parser.add_argument('-Retrain', type=str, action='store',dest='retrain_choice', help='specify which method to initialize model parameters') parser.add_argument('-topk', type=int, action='store', dest='topk', help='specify how many products to be recommended') if len(sys.argv) != 11: print 'Command e.g.: python train.py -Retrain True -Init zero(gaussian) '\ + '-Csampling uniform(adaptive) -Cbehavior triple(tuple) -topk 4' sys.exit(1) para = parser.parse_args() #genTrainFile(para.behavior_num) #genTrainFile1(para.behavior_num) genTrainFile2() #bpr = BPR() #bpr1 = BPR() bpr2 = BPR() if para.retrain_choice == "True": bpr2.model_init(settings["BPR_TRAIN_FILE"], para.init_choice) bpr2.train() recommend_result = bpr2.genRecommendResult(True, para.topk, settings["BPR_TRAIN_FILE"], para.init_choice) write_submission(recommend_result) else: recommend_result = bpr2.genRecommendResult(False, para.topk, settings["BPR_TRAIN_FILE"], para.init_choice) write_submission(recommend_result) if __name__ == "__main__": main()
anthonylife/TaobaoCompetition2014
src/bpr-1.0/train.py
Python
gpl-2.0
4,929
[ "Gaussian" ]
b1c8fa11a8e09e59f6359a32f98135158c1ecadfceeb681cb7f249b7d51c214f
import unittest from pygdbmi import parser from pygdbmi import visitors import io class TestPPrintVisitor(unittest.TestCase): def setUp(self): pass def _test_pprint(self, input_, expected_output, strict): f = io.StringIO() ast = parser.parse(input_, strict) visitor = visitors.PrettyPrintVisitor(outfile=f, en_colors=False) visitor.visit(ast) result = f.getvalue() self.assertEqual(result, expected_output) def test_simple(self): self._test_pprint('^done\n(gdb)\n', '^done\n', True) self._test_pprint('^done,a="2"\n(gdb)\n', '^done,\n a = "2"\n', True) self._test_pprint('712^exit\n', '712^exit\n', False) def test_one(self): input_ = '=breakpoint-created,bkpt={number="2",type="breakpoint",' \ 'disp="keep",enabled="y",addr="<MULTIPLE>",times="0",' \ 'original-location="add"},locations=[{number="2.1",' \ 'enabled="y",addr="0x00000000004004e0",func="add(int, int)",' \ 'file="/home/foo/bob.cc",fullname="/home/foo/bob.cc",' \ 'line="21",thread-groups=["i1"]},{number="2.2",enabled="y",' \ 'addr="0x00000000004004f8",func="add(double, double)",' \ 'file="/home/foo/bob.cc",fullname="/home/foo/bob.cc",' \ 'line="27",thread-groups=["i1"]}]\n' \ '4^done,numchild="2",displayhint="array",children=[child={' \ 'name="var3.[0]",exp="[0]",numchild="1",type="my_class",' \ 'thread-id="1"},child={name="var3.[1]",exp="[1]",numchild=' \ '"1",type="my_class",thread-id="1"}],has_more="0"\n' expected_output = \ '=breakpoint-created,\n' \ ' bkpt = {\n' \ ' number = "2",\n' \ ' type = "breakpoint",\n' \ ' disp = "keep",\n' \ ' enabled = "y",\n' \ ' addr = "<MULTIPLE>",\n' \ ' times = "0",\n' \ ' original-location = "add"\n' \ ' },\n' \ ' locations = [\n' \ ' {\n' \ ' number = "2.1",\n' \ ' enabled = "y",\n' \ ' addr = "0x00000000004004e0",\n' \ ' func = "add(int, int)",\n' \ ' file = "/home/foo/bob.cc",\n' \ ' fullname = "/home/foo/bob.cc",\n' \ ' line = "21",\n' \ ' thread-groups = [\n' \ ' "i1"\n' \ ' ]\n' \ ' },\n' \ ' {\n' \ ' number = "2.2",\n' \ ' enabled = "y",\n' \ ' addr = "0x00000000004004f8",\n' \ ' func = "add(double, double)",\n' \ ' file = "/home/foo/bob.cc",\n' \ ' fullname = "/home/foo/bob.cc",\n' \ ' line = "27",\n' \ ' thread-groups = [\n' \ ' "i1"\n' \ ' ]\n' \ ' }\n' \ ' ]\n' \ '4^done,\n' \ ' numchild = "2",\n' \ ' displayhint = "array",\n' \ ' children = [\n' \ ' child = {\n' \ ' name = "var3.[0]",\n' \ ' exp = "[0]",\n' \ ' numchild = "1",\n' \ ' type = "my_class",\n' \ ' thread-id = "1"\n' \ ' },\n' \ ' child = {\n' \ ' name = "var3.[1]",\n' \ ' exp = "[1]",\n' \ ' numchild = "1",\n' \ ' type = "my_class",\n' \ ' thread-id = "1"\n' \ ' }\n' \ ' ],\n' \ ' has_more = "0"\n' self._test_pprint(input_, expected_output, False)
simark/pygdbmi
tests/pprint_test.py
Python
mit
3,875
[ "VisIt" ]
619f5f3d9849ed608310a89a4e40a427dd9514d77f6dc6e1c3ce61f624e5c2d3
from __future__ import absolute_import, division import numpy as np import os import shutil import tempfile import matplotlib matplotlib.use('Agg', warn=False) from matplotlib.pyplot import Artist, savefig, clf, cm, get_cmap from matplotlib.testing.noseclasses import ImageComparisonFailure from matplotlib.testing.compare import compare_images from numpy import cos, sin, pi from shapely.geometry import Polygon, LineString, Point from six.moves import xrange from .util import unittest from geopandas import GeoSeries, GeoDataFrame, read_file # If set to True, generate images rather than perform tests (all tests will pass!) GENERATE_BASELINE = False BASELINE_DIR = os.path.join(os.path.dirname(__file__), 'baseline_images', 'test_plotting') TRAVIS = bool(os.environ.get('TRAVIS', False)) class TestImageComparisons(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() return def tearDown(self): shutil.rmtree(self.tempdir) return def _compare_images(self, ax, filename, tol=10): """ Helper method to do the comparisons """ assert isinstance(ax, Artist) if GENERATE_BASELINE: savefig(os.path.join(BASELINE_DIR, filename)) savefig(os.path.join(self.tempdir, filename)) err = compare_images(os.path.join(BASELINE_DIR, filename), os.path.join(self.tempdir, filename), tol, in_decorator=True) if err: raise ImageComparisonFailure('images not close: %(actual)s ' 'vs. %(expected)s ' '(RMS %(rms).3f)' % err) def test_poly_plot(self): """ Test plotting a simple series of polygons """ clf() filename = 'poly_plot.png' t1 = Polygon([(0, 0), (1, 0), (1, 1)]) t2 = Polygon([(1, 0), (2, 0), (2, 1)]) polys = GeoSeries([t1, t2]) ax = polys.plot() self._compare_images(ax=ax, filename=filename) def test_point_plot(self): """ Test plotting a simple series of points """ clf() filename = 'points_plot.png' N = 10 points = GeoSeries(Point(i, i) for i in xrange(N)) ax = points.plot() self._compare_images(ax=ax, filename=filename) def test_line_plot(self): """ Test plotting a simple series of lines """ clf() filename = 'lines_plot.png' N = 10 lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(N)]) ax = lines.plot() self._compare_images(ax=ax, filename=filename) @unittest.skipIf(TRAVIS, 'Skip on Travis (fails even though it passes locally)') def test_plot_GeoDataFrame_with_kwargs(self): """ Test plotting a simple GeoDataFrame consisting of a series of polygons with increasing values using various extra kwargs. """ clf() filename = 'poly_plot_with_kwargs.png' ts = np.linspace(0, 2*pi, 10, endpoint=False) # Build GeoDataFrame from a series of triangles wrapping around in a ring # and a second column containing a list of increasing values. r1 = 1.0 # radius of inner ring boundary r2 = 1.5 # radius of outer ring boundary def make_triangle(t0, t1): return Polygon([(r1*cos(t0), r1*sin(t0)), (r2*cos(t0), r2*sin(t0)), (r1*cos(t1), r1*sin(t1))]) polys = GeoSeries([make_triangle(t0, t1) for t0, t1 in zip(ts, ts[1:])]) values = np.arange(len(polys)) df = GeoDataFrame({'geometry': polys, 'values': values}) # Plot the GeoDataFrame using various keyword arguments to see if they are honoured ax = df.plot(column='values', cmap=cm.RdBu, vmin=+2, vmax=None, figsize=(8, 4)) self._compare_images(ax=ax, filename=filename) class TestPointPlotting(unittest.TestCase): def setUp(self): self.N = 10 self.points = GeoSeries(Point(i, i) for i in range(self.N)) values = np.arange(self.N) self.df = GeoDataFrame({'geometry': self.points, 'values': values}) def test_default_colors(self): ## without specifying values -> max 9 different colors # GeoSeries ax = self.points.plot() cmap = get_cmap('Set1', 9) expected_colors = cmap(list(range(9))*2) _check_colors(ax.get_lines(), expected_colors) # GeoDataFrame -> uses 'jet' instead of 'Set1' ax = self.df.plot() cmap = get_cmap('jet', 9) expected_colors = cmap(list(range(9))*2) _check_colors(ax.get_lines(), expected_colors) ## with specifying values ax = self.df.plot(column='values') cmap = get_cmap('jet') expected_colors = cmap(np.arange(self.N)/(self.N-1)) _check_colors(ax.get_lines(), expected_colors) def test_colormap(self): ## without specifying values -> max 9 different colors # GeoSeries ax = self.points.plot(cmap='RdYlGn') cmap = get_cmap('RdYlGn', 9) expected_colors = cmap(list(range(9))*2) _check_colors(ax.get_lines(), expected_colors) # GeoDataFrame -> same as GeoSeries in this case ax = self.df.plot(cmap='RdYlGn') _check_colors(ax.get_lines(), expected_colors) ## with specifying values ax = self.df.plot(column='values', cmap='RdYlGn') cmap = get_cmap('RdYlGn') expected_colors = cmap(np.arange(self.N)/(self.N-1)) _check_colors(ax.get_lines(), expected_colors) def test_single_color(self): ax = self.points.plot(color='green') _check_colors(ax.get_lines(), ['green']*self.N) ax = self.df.plot(color='green') _check_colors(ax.get_lines(), ['green']*self.N) ax = self.df.plot(column='values', color='green') _check_colors(ax.get_lines(), ['green']*self.N) class TestLineStringPlotting(unittest.TestCase): def setUp(self): self.N = 10 values = np.arange(self.N) self.lines = GeoSeries([LineString([(0, i), (9, i)]) for i in xrange(self.N)]) self.df = GeoDataFrame({'geometry': self.lines, 'values': values}) def test_single_color(self): ax = self.lines.plot(color='green') _check_colors(ax.get_lines(), ['green']*self.N) ax = self.df.plot(color='green') _check_colors(ax.get_lines(), ['green']*self.N) ax = self.df.plot(column='values', color='green') _check_colors(ax.get_lines(), ['green']*self.N) class TestPolygonPlotting(unittest.TestCase): def setUp(self): t1 = Polygon([(0, 0), (1, 0), (1, 1)]) t2 = Polygon([(1, 0), (2, 0), (2, 1)]) self.polys = GeoSeries([t1, t2]) self.df = GeoDataFrame({'geometry': self.polys, 'values': [0, 1]}) return def test_single_color(self): ax = self.polys.plot(color='green') _check_colors(ax.patches, ['green']*2, alpha=0.5) ax = self.df.plot(color='green') _check_colors(ax.patches, ['green']*2, alpha=0.5) ax = self.df.plot(column='values', color='green') _check_colors(ax.patches, ['green']*2, alpha=0.5) def test_vmin_vmax(self): # when vmin == vmax, all polygons should be the same color ax = self.df.plot(column='values', categorical=True, vmin=0, vmax=0) cmap = get_cmap('Set1', 2) self.assertEqual(ax.patches[0].get_facecolor(), ax.patches[1].get_facecolor()) class TestPySALPlotting(unittest.TestCase): @classmethod def setUpClass(cls): try: import pysal as ps except ImportError: raise unittest.SkipTest("PySAL is not installed") pth = ps.examples.get_path("columbus.shp") cls.tracts = read_file(pth) def test_legend(self): ax = self.tracts.plot(column='CRIME', scheme='QUANTILES', k=3, cmap='OrRd', legend=True) labels = [t.get_text() for t in ax.get_legend().get_texts()] expected = [u'0.00 - 26.07', u'26.07 - 41.97', u'41.97 - 68.89'] self.assertEqual(labels, expected) def _check_colors(collection, expected_colors, alpha=None): from matplotlib.lines import Line2D import matplotlib.colors as colors conv = colors.colorConverter for patch, color in zip(collection, expected_colors): if isinstance(patch, Line2D): # points/lines result = patch.get_color() else: # polygons result = patch.get_facecolor() assert conv.to_rgba(result) == conv.to_rgba(color, alpha=alpha) if __name__ == '__main__': unittest.main()
urschrei/geopandas
tests/test_plotting.py
Python
bsd-3-clause
8,763
[ "COLUMBUS" ]
f32522457308b3ba445c6dbe1873b084e22d9cccf703ba1caf603c4eca74554b
# -*- coding:utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """bibindex_engine_tokenizer_tests - unit tests for bibindex_engine_tokenizer There should always be at least one test class for each class in b_e_t. """ import unittest from invenio.testutils import make_test_suite, run_test_suite import bibindex_engine_tokenizer as tokenizer_lib class TestFuzzyNameTokenizerScanning(unittest.TestCase): """Test BibIndex name tokenization""" def setUp(self): self.tokenizer = tokenizer_lib.BibIndexFuzzyNameTokenizer() self.scan = self.tokenizer.scan def test_bifnt_scan_single(self): """BibIndexFuzzyNameTokenizer - scanning single names like 'Dido'""" teststr = "Dido" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Dido'], 'nonlastnames': [], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_simple_western_forward(self): """BibIndexFuzzyNameTokenizer - scanning simple Western-style: first last""" teststr = "Ringo Starr" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_simple_western_reverse(self): """BibIndexFuzzyNameTokenizer - scanning simple Western-style: last, first""" teststr = "Starr, Ringo" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_forward(self): """BibIndexFuzzyNameTokenizer - scanning multiword: first middle last""" teststr = "Michael Edward Peskin" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Peskin'], 'nonlastnames': ['Michael', 'Edward'], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dotcrammed(self): """BibIndexFuzzyNameTokenizer - scanning multiword: f.m. last""" teststr = "M.E. Peskin" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Peskin'], 'nonlastnames': ['M', 'E'], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dotcrammed_reversed(self): """BibIndexFuzzyNameTokenizer - scanning multiword: last, f.m.""" teststr = "Peskin, M.E." output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Peskin'], 'nonlastnames': ['M', 'E'], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dashcrammed(self): """BibIndexFuzzyNameTokenizer - scanning multiword: first-middle last""" teststr = "Jean-Luc Picard" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Picard'], 'nonlastnames': ['Jean', 'Luc'], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_multiname_dashcrammed_reversed(self): """BibIndexFuzzyNameTokenizer - scanning multiword: last, first-middle""" teststr = "Picard, Jean-Luc" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Picard'], 'nonlastnames': ['Jean', 'Luc'], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_compound_lastname_dashes(self): """BibIndexFuzzyNameTokenizer - scanning multiword: first middle last-last""" teststr = "Cantina Octavia Jones-Smith" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Jones', 'Smith'], 'nonlastnames': ['Cantina', 'Octavia'], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_compound_lastname_dashes_reverse(self): """BibIndexFuzzyNameTokenizer - scanning multiword: last-last, first middle""" teststr = "Jones-Smith, Cantina Octavia" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Jones', 'Smith'], 'nonlastnames': ['Cantina', 'Octavia'], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_compound_lastname_reverse(self): """BibIndexFuzzyNameTokenizer - scanning compound last: last last, first""" teststr = "Alvarez Gaume, Joachim" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Alvarez', 'Gaume'], 'nonlastnames': ['Joachim'], 'titles': []} self.assertEqual(output, anticipated) def test_bifnt_scan_titled(self): """BibIndexFuzzyNameTokenizer - scanning title-bearing: last, first, title""" teststr = "Epstein, Brian, The Fifth Beatle" output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Epstein'], 'nonlastnames': ['Brian'], 'titles': ['The Fifth Beatle']} self.assertEqual(output, anticipated) def test_bifnt_scan_wildly_interesting(self): """BibIndexFuzzyNameTokenizer - scanning last last last, first first, title, title""" teststr = "Ibanez y Gracia, Maria Luisa, II., ed." output = self.scan(teststr) anticipated = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Ibanez', 'y', 'Gracia'], 'nonlastnames': ['Maria', 'Luisa'], 'titles': ['II.', 'ed.']} self.assertEqual(output, anticipated) class TestFuzzyNameTokenizerTokens(unittest.TestCase): """Test BibIndex name variant token generation from scanned and tagged sets""" def setUp(self): self.tokenizer = tokenizer_lib.BibIndexFuzzyNameTokenizer() self.get_index_tokens = self.tokenizer.parse_scanned def test_bifnt_tokenize_single(self): """BibIndexFuzzyNameTokenizer - tokens for single-word name Ronaldo """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Ronaldo'], 'nonlastnames': [], 'titles': []} output = self.get_index_tokens(tagged_data) anticipated = ['Ronaldo'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_simple_forward(self): """BibIndexFuzzyNameTokenizer - tokens for first last Ringo Starr """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': []} output = self.get_index_tokens(tagged_data) anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_simple_reverse(self): """BibIndexFuzzyNameTokenizer - tokens for last, first Starr, Ringo """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Starr'], 'nonlastnames': ['Ringo'], 'titles': []} output = self.get_index_tokens(tagged_data) anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_twoname_forward(self): """BibIndexFuzzyNameTokenizer - tokens for first middle last Michael Edward Peskin """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Peskin'], 'nonlastnames': ['Michael', 'Edward'], 'titles': []} output = self.get_index_tokens(tagged_data) anticipated = ['E Peskin', 'Edward Peskin', 'M E Peskin', 'M Edward Peskin', 'M Peskin', 'Michael E Peskin', 'Michael Edward Peskin', 'Michael Peskin', 'Peskin, E', 'Peskin, Edward', 'Peskin, M', 'Peskin, M E', 'Peskin, M Edward', 'Peskin, Michael', 'Peskin, Michael E', 'Peskin, Michael Edward'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_compound_last(self): """BibIndexFuzzyNameTokenizer - tokens for last last, first Alvarez Gaume, Joachim """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Alvarez', 'Gaume'], 'nonlastnames': ['Joachim'], 'titles': []} output = self.get_index_tokens(tagged_data) anticipated = ['Alvarez Gaume, J', 'Alvarez Gaume, Joachim', 'Alvarez, J', 'Alvarez, Joachim', 'Gaume, J', 'Gaume, Joachim', 'J Alvarez', 'J Alvarez Gaume', 'J Gaume', 'Joachim Alvarez', 'Joachim Alvarez Gaume', 'Joachim Gaume'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_titled(self): """BibIndexFuzzyNameTokenizer - tokens for last, first, title Epstein, Brian, The Fifth Beatle """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Epstein'], 'nonlastnames': ['Brian'], 'titles': ['The Fifth Beatle']} output = self.get_index_tokens(tagged_data) anticipated = ['B Epstein', 'B Epstein, The Fifth Beatle', 'Brian Epstein', 'Brian Epstein, The Fifth Beatle', 'Epstein, B', 'Epstein, B, The Fifth Beatle', 'Epstein, Brian', 'Epstein, Brian, The Fifth Beatle'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_wildly_interesting(self): """BibIndexFuzzyNameTokenizer - tokens for last last last, first first, title, title Ibanez y Gracia, Maria Luisa, II, (ed.) """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Ibanez', 'y', 'Gracia'], 'nonlastnames': ['Maria', 'Luisa'], 'titles': ['II', '(ed.)']} output = self.get_index_tokens(tagged_data) anticipated = ['Gracia, L', 'Gracia, Luisa', 'Gracia, M', 'Gracia, M L', 'Gracia, M Luisa', 'Gracia, Maria', 'Gracia, Maria L', 'Gracia, Maria Luisa', 'Ibanez y Gracia, L', 'Ibanez y Gracia, L, II', 'Ibanez y Gracia, Luisa', 'Ibanez y Gracia, Luisa, II', 'Ibanez y Gracia, M', 'Ibanez y Gracia, M L', 'Ibanez y Gracia, M L, II', 'Ibanez y Gracia, M Luisa', 'Ibanez y Gracia, M Luisa, II', 'Ibanez y Gracia, M, II', 'Ibanez y Gracia, Maria', 'Ibanez y Gracia, Maria L', 'Ibanez y Gracia, Maria L, II', 'Ibanez y Gracia, Maria Luisa', 'Ibanez y Gracia, Maria Luisa, II', 'Ibanez y Gracia, Maria, II', 'Ibanez, L', 'Ibanez, Luisa', 'Ibanez, M', 'Ibanez, M L', 'Ibanez, M Luisa', 'Ibanez, Maria', 'Ibanez, Maria L', 'Ibanez, Maria Luisa', 'L Gracia', 'L Ibanez', 'L Ibanez y Gracia', 'L Ibanez y Gracia, II', 'Luisa Gracia', 'Luisa Ibanez', 'Luisa Ibanez y Gracia', 'Luisa Ibanez y Gracia, II', 'M Gracia', 'M Ibanez', 'M Ibanez y Gracia', 'M Ibanez y Gracia, II', 'M L Gracia', 'M L Ibanez', 'M L Ibanez y Gracia', 'M L Ibanez y Gracia, II', 'M Luisa Gracia', 'M Luisa Ibanez', 'M Luisa Ibanez y Gracia', 'M Luisa Ibanez y Gracia, II', 'Maria Gracia', 'Maria Ibanez', 'Maria Ibanez y Gracia', 'Maria Ibanez y Gracia, II', 'Maria L Gracia', 'Maria L Ibanez', 'Maria L Ibanez y Gracia', 'Maria L Ibanez y Gracia, II', 'Maria Luisa Gracia', 'Maria Luisa Ibanez', 'Maria Luisa Ibanez y Gracia', 'Maria Luisa Ibanez y Gracia, II'] self.assertEqual(output, anticipated) def test_bifnt_tokenize_multimiddle_forward(self): """BibIndexFuzzyNameTokenizer - tokens for first middle middle last W K H Panofsky """ tagged_data = {'TOKEN_TAG_LIST': ['lastnames', 'nonlastnames', 'titles'], 'lastnames': ['Panofsky'], 'nonlastnames': ['W', 'K', 'H'], 'titles': []} output = self.get_index_tokens(tagged_data) anticipated = ['H Panofsky', 'K H Panofsky', 'K Panofsky', 'Panofsky, H', 'Panofsky, K', 'Panofsky, K H', 'Panofsky, W', 'Panofsky, W H', 'Panofsky, W K', 'Panofsky, W K H', 'W H Panofsky', 'W K H Panofsky', 'W K Panofsky', 'W Panofsky'] self.assertEqual(output, anticipated) def test_tokenize(self): """BibIndexFuzzyNameTokenizer - check tokenize() Ringo Starr """ teststr = "Ringo Starr" output = self.tokenizer.tokenize(teststr) anticipated = ['R Starr', 'Ringo Starr', 'Starr, R', 'Starr, Ringo'] self.assertEqual(output, anticipated) class TestExactNameTokenizer(unittest.TestCase): """Test exact author name tokenizer.""" def setUp(self): """setup""" self.tokenizer = tokenizer_lib.BibIndexExactNameTokenizer() def test_exact_author_name_tokenizer_bare(self): """BibIndexExactNameTokenizer - bare name""" self.assertEqual(self.tokenizer.tokenize('John Doe'), ['John Doe']) def test_exact_author_name_tokenizer_dots(self): """BibIndexExactNameTokenizer - name with dots""" self.assertEqual(self.tokenizer.tokenize('J. Doe'), ['J Doe']) self.assertEqual(self.tokenizer.tokenize('J.R. Doe'), ['J R Doe']) self.assertEqual(self.tokenizer.tokenize('J. R. Doe'), ['J R Doe']) def test_exact_author_name_tokenizer_hyphens(self): """BibIndexExactNameTokenizer - name with hyphens""" self.assertEqual(self.tokenizer.tokenize('Doe, Jean-Pierre'), ['Doe, Jean Pierre']) TEST_SUITE = make_test_suite(TestFuzzyNameTokenizerScanning, TestFuzzyNameTokenizerTokens, TestExactNameTokenizer,) if __name__ == '__main__': #unittest.main() run_test_suite(TEST_SUITE)
kaplun/Invenio-OpenAIRE
modules/bibindex/lib/bibindex_engine_tokenizer_tests.py
Python
gpl-2.0
15,529
[ "Brian" ]
6862962ff0f1242bf38394871c27eeb65062db417bec7dbe5405b1629ce5beee
#!/usr/bin/python # -- Content-Encoding: UTF-8 -- """ Python modules repository :author: Thomas Calmant :license: Apache Software License 2.0 .. Copyright 2014 isandlaTech Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Standard library import ast import imp import logging import os # ######### added by: Bassem D. import json # ######### # Pelix from pelix.ipopo.decorators import ComponentFactory, Provides, Property, \ Invalidate, Validate from pelix.utilities import is_string # Repository beans import cohorte import cohorte.repositories from cohorte.repositories.beans import Artifact, Version # ------------------------------------------------------------------------------ # Documentation strings format __docformat__ = "restructuredtext en" # Version __version_info__ = (1, 0, 1) __version__ = ".".join(str(x) for x in __version_info__) # ------------------------------------------------------------------------------ _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ class Module(Artifact): """ Represents a bundle """ def __init__(self, name, version, imports, filename): """ Sets up the bundle details :param name: Name of the module :param version: Version of the module (as a string) :param imports: List of names of imported modules :param filename: Path to the .py file :raise ValueError: Invalid argument """ Artifact.__init__(self, "python", name, version, filename) # Store information self.all_imports = imports def imports(self, artifact): """ Tests if this module might import the given artifact :param artifact: Another artifact :return: True if this module imports the given one """ if artifact.language != self.language: # No inter-language imports return False return artifact.name in self.all_imports # ------------------------------------------------------------------------------ class AstVisitor(ast.NodeVisitor): """ AST visitor to extract imports and version """ # pylint: disable=invalid-name def __init__(self, module_name, is_package): """ Sets up the visitor :param module_name: The module name :param is_package: Whether the name is a package name """ ast.NodeVisitor.__init__(self) self.imports = set() self.version = None self.module_parts = module_name.split(".") # Drop module name, keeping only packages' names if not is_package: self.module_parts = self.module_parts[:-1] self.module_name = module_name def generic_visit(self, node): """ Custom default visit method that avoids to visit further that the module level. """ if type(node) is ast.Module: ast.NodeVisitor.generic_visit(self, node) def resolve_relative_import_from(self, node): """ Converts a relative import (import .module) into an absolute one :param node: An ImportFrom AST node :return: The absolute module name """ if node.level > 0: # Relative import if node.level == 1: parent = '.'.join(self.module_parts) else: parent = '.'.join(self.module_parts[:-node.level + 1]) if node.module: # from .module import ... return '.'.join((parent, node.module)) else: # from . import ... return parent else: # Absolute import return node.module def visit_Import(self, node): """ Found an "import" """ for alias in node.names: self.imports.add(alias.name) def visit_ImportFrom(self, node): """ Found a "from ... import ..." """ imported = self.resolve_relative_import_from(node) self.imports.add(imported) def visit_Assign(self, node): """ Found an assignment """ field = getattr(node.targets[0], 'id', None) if not self.version \ and field in ('__version__', '__version_info__'): try: version_parsed = ast.literal_eval(node.value) if isinstance(version_parsed, (tuple, list)): self.version = ".".join(str(version_parsed)) else: self.version = str(version_parsed) except ValueError: # Ignore errors pass def _extract_module_info(filename, module_name, is_package): """ Extract the version and the imports from the given Python file :param filename: Path to the file to parse :param module_name: The fully-qualified module name :param is_package: Whether the name is a package name :return: A (version, [imports]) tuple :raise ValueError: Unreadable file """ try: with open(filename) as filep: source = filep.read() except (OSError, IOError) as ex: raise ValueError("Error reading {0}: {1}".format(filename, ex)) visitor = AstVisitor(module_name, is_package) try: module = ast.parse(source, filename, 'exec') except (ValueError, SyntaxError, TypeError) as ex: raise ValueError("Error parsing {0}: {1}".format(filename, ex)) visitor.visit(module) return visitor.version, visitor.imports # ------------------------------------------------------------------------------ @ComponentFactory("cohorte-repository-artifacts-python-factory") @Provides(cohorte.repositories.SERVICE_REPOSITORY_ARTIFACTS) @Property('_language', cohorte.repositories.PROP_REPOSITORY_LANGUAGE, "python") class PythonModuleRepository(object): """ Represents a repository """ def __init__(self): """ Sets up the repository """ self._language = "python" # Name -> [Modules] self._modules = {} # Directory name -> Package name self._directory_package = {} # File -> Module self._files = {} def __contains__(self, item): """ Tests if the given item is in the repository :param item: Item to be tested :return: True if the item is in the repository """ if isinstance(item, Artifact): # Test artifact language if item.language != "python": return False # Test if the name is in the modules return item.name in self._modules elif item in self._modules: # Item matches a module name return True else: # Test the file name for name in (item, os.path.realpath(item)): if name in self._files: return True # No match return False def __len__(self): """ Length of a repository <=> number of individual artifacts """ return sum((len(modules) for modules in self._modules.values())) def __add_module(self, module, registry=None): """ Adds a module to the registry :param module: A Module object :param registry: Registry where to store the module """ if registry is None: registry = self._modules # Add the module to the registry modules_list = registry.setdefault(module.name, []) if module not in modules_list: modules_list.append(module) modules_list.sort(reverse=True) # Associate the file name with the module self._files[module.file] = module @staticmethod def __compute_name(root, filename): """ Computes the module name of the given file by looking for '__init__.py' files in its parent directories :param filename: Path of the module file :return: The Python name of the module, and a boolean indicating whether the name is a package name :raise ValueError: Invalid directory name """ # Subtract the root part filename = os.path.relpath(filename, root) # Drop extension filename = os.path.splitext(filename)[0] name_parts = filename.split(os.path.sep) is_package = name_parts[len(name_parts)-1] == "__init__" if is_package: name_parts = name_parts[:-1] return ".".join(name_parts), is_package @staticmethod def __test_import(name): """ Tries to import the given module, using imp.find_module(). :param name: A module name :return: True if the module can be imported """ try: # find_module() uses a path-like name, not a dotted one path_name = name.replace('.', os.sep) result = imp.find_module(path_name) except ImportError: # Module not found return False else: # Module found: close the file opened by find_module(), if any if result[0] is not None: result[0].close() return True def add_file(self, root, filename): """ Adds a Python file to the repository :param root: Path to the python package base of the added file :param filename: A Python full-path file name :raise ValueError: Unreadable file """ # Compute the real name of the Python file realfile = os.path.realpath(filename) if realfile in self._files: # Already read it: ignore return if os.path.basename(filename).startswith('.'): # Hidden file: ignore return # Compute the complete module name name, is_package = self.__compute_name(root, filename) # Parse the file version, imports = _extract_module_info(realfile, name, is_package) # Store the module self.__add_module(Module(name, version, imports, realfile)) @staticmethod def __is_module(dirname): """ Class method testing whether a directory, given its name, contains a valid python package. :param dirname: The directory' name :return: True if the directory contains a valid python package. False otherwise. """ init_file = os.path.join(dirname, "__init__.py") return os.path.exists(init_file) def add_directory(self, dirname): """ Recursively adds all .py modules found in the given directory into the repository :param dirname: A path to a directory """ for root, dirnames, filenames in os.walk(dirname, followlinks=True): # Check if the current directory, ie. root, is either the base # directory or a valid python package. # Otherwise, do not walk through sub-directories. if not os.path.samefile(dirname, root) \ and not self.__is_module(root): continue for filename in filenames: if os.path.splitext(filename)[1] == '.py': fullname = os.path.join(root, filename) try: self.add_file(dirname, fullname) except ValueError as ex: _logger.warning("Error analyzing %s: %s", fullname, ex) def clear(self): """ Clears the repository content """ self._modules.clear() self._files.clear() self._directory_package.clear() def get_artifact(self, name=None, version=None, filename=None, registry=None): """ Retrieves a module from the repository :param name: The module name (mutually exclusive with filename) :param version: The module version (None or '0.0.0' for any), ignored if filename is used :param filename: The module file name (mutually exclusive with name) :param registry: Registry where to look for the module :return: The first matching module :raise ValueError: If the module can't be found """ if registry is None: registry = self._modules if filename: # Use the file name (direct search) module = self._files.get(filename) if module: # Found it return module for bundle_file in self._files: # Search by file base name if os.path.basename(bundle_file) == filename: return self._files[bundle_file] if not name: # Not found by file name, and no name to look for raise ValueError("Module file not found: {0}".format(filename)) if isinstance(name, Module): # Got a module module = name if module in registry: return module else: # Use the module name and version name = module.name version = module.version matching = registry.get(name, None) if not matching: raise ValueError('Module {0} not found.'.format(name)) for module in matching: if module.version.matches(version): return module raise ValueError('Module {0} not found for version {1}' .format(name, version)) def get_language(self): """ Retrieves the language of the artifacts stored in this repository """ return self._language def resolve_installation(self, artifacts, system_artifacts=None): """ Returns all the artifacts that must be installed in order to have the given modules resolved. :param artifacts: A list of bundles to be modules :param system_artifacts: Modules considered as available :return: A tuple: (modules, dependencies, missing artifacts, []) """ # Name -> Module for this resolution local_modules = {} # Module -> [Modules] dependencies = {} # Missing elements missing_modules = set() # Consider system modules already installed if system_artifacts: for module in system_artifacts: if is_string(module): if module in self._modules: module = self._modules[module] else: module = Module(str(module), None, None, None) if isinstance(module, Module): # Only accept modules self.__add_module(module, local_modules) # Resolution loop to_install = [self.get_artifact(name) for name in artifacts] i = 0 while i < len(to_install): # Loop control module = to_install[i] i += 1 # Add the current module self.__add_module(module, local_modules) dependencies[module] = [] # Resolve import ... for imported in module.all_imports: # Find the module registry = None provider = None for registry in (local_modules, self._modules): try: provider = self.get_artifact(imported, None, None, registry) # Found one break except ValueError: # Try next pass else: # No provider found, try to import the file if not self.__test_import(imported): # Totally unknown module missing_modules.add(imported) # Resolve next import continue # Store the module we found dependencies[module].append(provider) if registry is self._modules: # The provider was found in the global registry, store it self.__add_module(provider, local_modules) # Store the dependency dependencies[module].append(provider) # The new module will be resolved later if provider not in to_install: # We'll have to resolve it to_install.append(provider) return to_install, dependencies, missing_modules, [] def walk(self): """ # Walk through the known artifacts """ for modules in self._modules.values(): for module in modules: yield module # ######### added by: Bassem D. def load_cache(self): """ Loads the cache from system file to memory """ use_cache = os.environ.get('COHORTE_USE_CACHE') if use_cache and use_cache.lower() == "true": try: with open('cache.js') as input_file: cache = json.load(input_file) if cache: _logger.info("loading repository from cache...") # load modules for module in cache["modules"]: name = module["name"] version = Version(module["version"]) filename = module["filename"] module_bean = Module(name, version, [], filename) self.__add_module(module_bean, self._modules) for directory in cache["directories"]: self._directory_package[directory["dir_name"]] \ = directory["pkg_name"] return True except (IOError, ValueError): # Error reading/parsing cache file return False # No cache return False def save_cache(self): """ Saves the cache from memory to system file """ use_cache = os.environ.get('COHORTE_USE_CACHE') if use_cache and use_cache.lower() == "true": # dump modules _logger.info("Dumping cache info...") # Name -> [Modules] cache_modules = [ {"name": module.name, "version": str(module.version), "language": module.language, "filename": module.file} for name, modules in self._modules.items() for module in modules] # Directory name -> Package name cache_directories = [ {"dir_name": dir_name, "pkg_name": self._directory_package[dir_name]} for dir_name in self._directory_package] # Write cache cache = {"modules": cache_modules, "directories": cache_directories} with open('cache.js', 'w') as outfile: json.dump(cache, outfile, indent=4) # ######### @Validate def validate(self, context): """ Component validated """ # ######### added by: Bassem D. # check if there is a cache file, load it if so # visit repo files and check if the modification date is changed # if so, load the file and update the cached entry # if there were no cache file, we create it at the end of the parsing status = self.load_cache() if not status: _logger.info("Loading repository from file system...") # ######### # Load repositories in another thread # Home/Base repository for key in (cohorte.PROP_BASE, cohorte.PROP_HOME): repository = os.path.join(context.get_property(key), "repo") self.add_directory(repository) # Python path directories python_path = os.getenv("PYTHONPATH", None) if python_path: for path in python_path.split(os.pathsep): self.add_directory(path) # ######### added by: Bassem D. self.save_cache() # ######### @Invalidate def invalidate(self, context): """ Component invalidated """ self.clear()
ahmadshahwan/cohorte-runtime
python/cohorte/repositories/python/modules.py
Python
apache-2.0
21,225
[ "VisIt" ]
681014a76cb8ef4c60e2cb11c8a42c13cc7deef8250bc08532d3e46a5cfa63ec
from ase.io import write from gpaw import restart basename = 'CO' # load nc binary file and get calculator atoms, calc = restart(basename + '.gpw') # write atomic positions to xyz-file write(basename + '.xyz', atoms) # loop over all wfs and write their cube files nbands = calc.get_number_of_bands() for band in range(nbands): wf = calc.get_pseudo_wave_function(band=band) fname=basename + '_' + '%d' % (band) + '.plt' print 'writing wf', band, 'to file', fname write(fname, atoms, data=wf)
qsnake/gpaw
doc/tutorials/plotting/CO2plt.py
Python
gpl-3.0
512
[ "ASE", "GPAW" ]
4f5d7a1f1e4ff32ac4fb48f5dc27a85a53196bddf2cce2279fe0ee5d22ca69fb
######################################################################### ## This program is part of 'MOOSE', the ## Messaging Object Oriented Simulation Environment. ## Copyright (C) 2013 Upinder S. Bhalla. and NCBS ## It is made available under the terms of the ## GNU Lesser General Public License version 2.1 ## See the file COPYING.LIB for the full notice. ######################################################################### import math import pylab import numpy import moose def makeModel(): # create container for model model = moose.Neutral( 'model' ) compartment = moose.CubeMesh( '/model/compartment' ) compartment.volume = 1e-20 # the mesh is created automatically by the compartment mesh = moose.element( '/model/compartment/mesh' ) # create molecules and reactions a = moose.Pool( '/model/compartment/a' ) b = moose.Pool( '/model/compartment/b' ) c = moose.Pool( '/model/compartment/c' ) enz1 = moose.Enz( '/model/compartment/b/enz1' ) enz2 = moose.Enz( '/model/compartment/c/enz2' ) cplx1 = moose.Pool( '/model/compartment/b/enz1/cplx' ) cplx2 = moose.Pool( '/model/compartment/c/enz2/cplx' ) reac = moose.Reac( '/model/compartment/reac' ) # connect them up for reactions moose.connect( enz1, 'sub', a, 'reac' ) moose.connect( enz1, 'prd', b, 'reac' ) moose.connect( enz1, 'enz', b, 'reac' ) moose.connect( enz1, 'cplx', cplx1, 'reac' ) moose.connect( enz2, 'sub', b, 'reac' ) moose.connect( enz2, 'prd', a, 'reac' ) moose.connect( enz2, 'enz', c, 'reac' ) moose.connect( enz2, 'cplx', cplx2, 'reac' ) moose.connect( reac, 'sub', a, 'reac' ) moose.connect( reac, 'prd', b, 'reac' ) # connect them up to the compartment for volumes #for x in ( a, b, c, cplx1, cplx2 ): # moose.connect( x, 'mesh', mesh, 'mesh' ) # Assign parameters a.concInit = 1 b.concInit = 0 c.concInit = 0.01 enz1.kcat = 0.4 enz1.Km = 4 enz2.kcat = 0.6 enz2.Km = 0.01 reac.Kf = 0.001 reac.Kb = 0.01 # Create the output tables graphs = moose.Neutral( '/model/graphs' ) outputA = moose.Table2( '/model/graphs/concA' ) outputB = moose.Table2( '/model/graphs/concB' ) # connect up the tables moose.connect( outputA, 'requestOut', a, 'getConc' ); moose.connect( outputB, 'requestOut', b, 'getConc' ); ''' # Schedule the whole lot moose.setClock( 4, 0.01 ) # for the computational objects moose.setClock( 8, 1.0 ) # for the plots # The wildcard uses # for single level, and ## for recursive. moose.useClock( 4, '/model/compartment/##', 'process' ) moose.useClock( 8, '/model/graphs/#', 'process' ) ''' def displayPlots(): for x in moose.wildcardFind( '/model/graphs/conc#' ): t = numpy.arange( 0, x.vector.size, 1 ) #sec pylab.plot( t, x.vector, label=x.name ) def main(): """ This example illustrates how to run a model at different volumes. The key line is just to set the volume of the compartment:: compt.volume = vol If everything else is set up correctly, then this change propagates through to all reactions molecules. For a deterministic reaction one would not see any change in output concentrations. For a stochastic reaction illustrated here, one sees the level of 'noise' changing, even though the concentrations are similar up to a point. This example creates a bistable model having two enzymes and a reaction. One of the enzymes is autocatalytic. This model is set up within the script rather than using an external file. The model is set up to run using the GSSA (Gillespie Stocahstic systems algorithim) method in MOOSE. To run the example, run the script ``python scaleVolumes.py`` and hit ``enter`` every cycle to see the outcome of stochastic calculations at ever smaller volumes, keeping concentrations the same. """ makeModel() moose.seed( 11111 ) gsolve = moose.Gsolve( '/model/compartment/gsolve' ) stoich = moose.Stoich( '/model/compartment/stoich' ) compt = moose.element( '/model/compartment' ); stoich.compartment = compt stoich.ksolve = gsolve stoich.path = "/model/compartment/##" #moose.setClock( 5, 1.0 ) # clock for the solver #moose.useClock( 5, '/model/compartment/gsolve', 'process' ) a = moose.element( '/model/compartment/a' ) for vol in ( 1e-19, 1e-20, 1e-21, 3e-22, 1e-22, 3e-23, 1e-23 ): # Set the volume compt.volume = vol print 'vol = ', vol, ', a.concInit = ', a.concInit, ', a.nInit = ', a.nInit moose.reinit() moose.start( 100.0 ) # Run the model for 100 seconds. a = moose.element( '/model/compartment/a' ) b = moose.element( '/model/compartment/b' ) # move most molecules over to b b.conc = b.conc + a.conc * 0.9 a.conc = a.conc * 0.1 moose.start( 100.0 ) # Run the model for 100 seconds. # move most molecules back to a a.conc = a.conc + b.conc * 0.99 b.conc = b.conc * 0.01 moose.start( 100.0 ) # Run the model for 100 seconds. # Iterate through all plots, dump their contents to data.plot. displayPlots() pylab.show( block=False ) print 'vol = ', vol, 'hit enter to go to next plot' raw_input() quit() # Run the 'main' if this script is executed standalone. if __name__ == '__main__': main()
dilawar/moose-full
moose-examples/snippets/scaleVolumes.py
Python
gpl-2.0
5,431
[ "MOOSE" ]
61dd241c02a63e6db5e6c58334912f458d2f51477ce798753f552fb7150ad9d1
# Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ Predict volumes of crystal structures. """ import os import warnings import numpy as np from monty.serialization import loadfn from pymatgen.analysis.bond_valence import BVAnalyzer from pymatgen.analysis.structure_matcher import StructureMatcher from pymatgen.core import Structure MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) bond_params = loadfn(os.path.join(MODULE_DIR, "DLS_bond_params.yaml")) def _is_ox(structure): comp = structure.composition for k in comp.keys(): try: k.oxi_state except AttributeError: return False return True class RLSVolumePredictor: """ Reference lattice scaling (RLS) scheme that predicts the volume of a structure based on a known crystal structure. """ def __init__(self, check_isostructural=True, radii_type="ionic-atomic", use_bv=True): """ Args: check_isostructural: Whether to test that the two structures are isostructural. This algo works best for isostructural compounds. Defaults to True. radii_type (str): Types of radii to use. You can specify "ionic" (only uses ionic radii), "atomic" (only uses atomic radii) or "ionic-atomic" (uses either ionic or atomic radii, with a preference for ionic where possible). use_bv (bool): Whether to use BVAnalyzer to determine oxidation states if not present. """ self.check_isostructural = check_isostructural self.radii_type = radii_type self.use_bv = use_bv def predict(self, structure, ref_structure): """ Given a structure, returns the predicted volume. Args: structure (Structure): structure w/unknown volume ref_structure (Structure): A reference structure with a similar structure but different species. Returns: a float value of the predicted volume """ if self.check_isostructural: m = StructureMatcher() mapping = m.get_best_electronegativity_anonymous_mapping(structure, ref_structure) if mapping is None: raise ValueError("Input structures do not match!") if "ionic" in self.radii_type: try: # Use BV analyzer to determine oxidation states only if the # oxidation states are not already specified in the structure # and use_bv is true. if (not _is_ox(structure)) and self.use_bv: a = BVAnalyzer() structure = a.get_oxi_state_decorated_structure(structure) if (not _is_ox(ref_structure)) and self.use_bv: a = BVAnalyzer() ref_structure = a.get_oxi_state_decorated_structure(ref_structure) comp = structure.composition ref_comp = ref_structure.composition # Check if all the associated ionic radii are available. if any(k.ionic_radius is None for k in list(comp.keys())) or any( k.ionic_radius is None for k in list(ref_comp.keys()) ): raise ValueError("Not all the ionic radii are available!") numerator = 0 denominator = 0 # Here, the 1/3 factor on the composition accounts for atomic # packing. We want the number per unit length. for k, v in comp.items(): numerator += k.ionic_radius * v ** (1 / 3) for k, v in ref_comp.items(): denominator += k.ionic_radius * v ** (1 / 3) return ref_structure.volume * (numerator / denominator) ** 3 except Exception: warnings.warn("Exception occurred. Will attempt atomic radii.") # If error occurs during use of ionic radii scheme, pass # and see if we can resolve it using atomic radii. if "atomic" in self.radii_type: comp = structure.composition ref_comp = ref_structure.composition # Here, the 1/3 factor on the composition accounts for atomic # packing. We want the number per unit length. numerator = 0 denominator = 0 for k, v in comp.items(): numerator += k.atomic_radius * v ** (1 / 3) for k, v in ref_comp.items(): denominator += k.atomic_radius * v ** (1 / 3) return ref_structure.volume * (numerator / denominator) ** 3 raise ValueError("Cannot find volume scaling based on radii choices specified!") def get_predicted_structure(self, structure, ref_structure): """ Given a structure, returns back the structure scaled to predicted volume. Args: structure (Structure): structure w/unknown volume ref_structure (Structure): A reference structure with a similar structure but different species. Returns: a Structure object with predicted volume """ new_structure = structure.copy() new_structure.scale_lattice(self.predict(structure, ref_structure)) return new_structure class DLSVolumePredictor: """ Data-mined lattice scaling (DLS) scheme that relies on data-mined bond lengths to predict the crystal volume of a given structure. As of 2/12/19, we suggest this method be used in conjunction with min_scaling and max_scaling to prevent instances of very large, unphysical predicted volumes found in a small subset of structures. """ def __init__(self, cutoff=4.0, min_scaling=0.5, max_scaling=1.5): """ Args: cutoff (float): cutoff radius added to site radius for finding site pairs. Necessary to increase only if your initial structure guess is extremely bad (atoms way too far apart). In all other instances, increasing cutoff gives same answer but takes more time. min_scaling (float): if not None, this will ensure that the new volume is at least this fraction of the original (preventing too-small volumes) max_scaling (float): if not None, this will ensure that the new volume is at most this fraction of the original (preventing too-large volumes) """ self.cutoff = cutoff self.min_scaling = min_scaling self.max_scaling = max_scaling def predict(self, structure, icsd_vol=False): """ Given a structure, returns the predicted volume. Args: structure (Structure) : a crystal structure with an unknown volume. icsd_vol (bool) : True if the input structure's volume comes from ICSD. Returns: a float value of the predicted volume. """ # Get standard deviation of electronnegativity in the structure. std_x = np.std([site.specie.X for site in structure]) # Sites that have atomic radii sub_sites = [] # Record the "DLS estimated radius" from bond_params. bp_dict = {} for sp in list(structure.composition.keys()): if sp.atomic_radius: sub_sites.extend([site for site in structure if site.specie == sp]) else: warnings.warn(f"VolumePredictor: no atomic radius data for {sp}") if sp.symbol not in bond_params: warnings.warn(f"VolumePredictor: bond parameters not found, used atomic radii for {sp}") else: r, k = bond_params[sp.symbol]["r"], bond_params[sp.symbol]["k"] bp_dict[sp] = float(r) + float(k) * std_x # Structure object that include only sites with known atomic radii. reduced_structure = Structure.from_sites(sub_sites) smallest_ratio = None for site1 in reduced_structure: sp1 = site1.specie neighbors = reduced_structure.get_neighbors(site1, sp1.atomic_radius + self.cutoff) for nn in neighbors: sp2 = nn.specie if sp1 in bp_dict and sp2 in bp_dict: expected_dist = bp_dict[sp1] + bp_dict[sp2] else: expected_dist = sp1.atomic_radius + sp2.atomic_radius if not smallest_ratio or nn.nn_distance / expected_dist < smallest_ratio: smallest_ratio = nn.nn_distance / expected_dist if not smallest_ratio: raise ValueError("Could not find any bonds within the given cutoff in this structure.") volume_factor = (1 / smallest_ratio) ** 3 # icsd volume fudge factor if icsd_vol: volume_factor *= 1.05 if self.min_scaling: volume_factor = max(self.min_scaling, volume_factor) if self.max_scaling: volume_factor = min(self.max_scaling, volume_factor) return structure.volume * volume_factor def get_predicted_structure(self, structure, icsd_vol=False): """ Given a structure, returns back the structure scaled to predicted volume. Args: structure (Structure): structure w/unknown volume Returns: a Structure object with predicted volume """ new_structure = structure.copy() new_structure.scale_lattice(self.predict(structure, icsd_vol=icsd_vol)) return new_structure
materialsproject/pymatgen
pymatgen/analysis/structure_prediction/volume_predictor.py
Python
mit
9,788
[ "CRYSTAL", "pymatgen" ]
caf629ff0b95ef94eeec321844b5bc1497555e3d49e83b8b3eeabdbd947846af
# -*- coding: utf-8 -*- import functools import inspyred import copy import pyvotune from pyvotune.log import logger from pyvotune.util.id_generator import get_id log = logger() def crossover(cross): @functools.wraps(cross) def validating_crossover(random, mom, dad, args): max_crossover_attempts = args.setdefault('max_crossover_attempts', 25) for i in range(max_crossover_attempts): children = cross(random, mom, dad, args) children = [c for c in children if c.validate()] if children: return children #log.debug(u"Crossing over failed between {0} and {1}".format( #mom, dad)) return [mom, dad] return inspyred.ec.variators.crossovers.crossover( validating_crossover) @crossover def n_point_crossover(random, mom, dad, args): crossover_rate = args.setdefault('crossover_rate', 1.0) num_crossover_points = args.setdefault('num_crossover_points', 1) children = [] if random.random() < crossover_rate: mom_genes = mom.group_genes(remove_noops=False) dad_genes = dad.group_genes(remove_noops=False) bro = copy.copy(dad_genes) sis = copy.copy(mom_genes) num_cuts = min(len(mom_genes) - 1, num_crossover_points) cut_points = random.sample(range(1, len(mom_genes)), num_cuts) cut_points.sort() normal = True for i, (m, d) in enumerate(zip(mom_genes, dad_genes)): if i in cut_points: normal = not normal if not normal: bro[i] = m sis[i] = d children.append(pyvotune.Genome( get_id(), initial_state=dad.initial_state, init_parts=bro, parent=dad)) children.append(pyvotune.Genome( get_id(), initial_state=mom.initial_state, init_parts=sis, parent=mom)) else: children.append(mom) children.append(dad) return children @crossover def uniform_crossover(random, mom, dad, args): ux_bias = args.setdefault('ux_bias', 0.5) crossover_rate = args.setdefault('crossover_rate', 1.0) children = [] if random.random() < crossover_rate: mom_genes = mom.group_genes(remove_noops=False) dad_genes = dad.group_genes(remove_noops=False) bro = copy.copy(dad_genes) sis = copy.copy(mom_genes) for i, (m, d) in enumerate(zip(mom_genes, dad_genes)): if random.random() < ux_bias: bro[i] = m sis[i] = d children.append(pyvotune.Genome( get_id(), initial_state=dad.initial_state, init_parts=bro, parent=dad)) children.append(pyvotune.Genome( get_id(), initial_state=mom.initial_state, init_parts=sis, parent=mom)) else: children.append(mom) children.append(dad) return children #""" #================= #:mod:`crossovers` #================= #.. Copyright 2012 Inspired Intelligence Initiative #.. This program is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. #.. This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #.. You should have received a copy of the GNU General Public License #along with this program. If not, see <http://www.gnu.org/licenses/>. #.. module:: crossovers #.. moduleauthor:: Aaron Garrett <aaron.lee.garrett@gmail.com> #""" #import copy #import functools #import math #try: #import cPickle as pickle #except ImportError: #import pickle #def crossover(cross): #"""Return an inspyred crossover function based on the given function. #This function generator takes a function that operates on only #two parent candidates to produce an iterable sequence of offspring #(typically two). The generator handles the pairing of selected #parents and collecting of all offspring. #The generated function chooses every odd candidate as a 'mom' and #every even as a 'dad' (discounting the last candidate if there is #an odd number). For each mom-dad pair, offspring are produced via #the `cross` function. #The given function ``cross`` must have the following signature:: #offspring = cross(random, mom, dad, args) #This function is most commonly used as a function decorator with #the following usage:: #@crossover #def cross(random, mom, dad, args): ## Implementation of paired crossing #pass #The generated function also contains an attribute named #``single_crossover`` which holds the original crossover function. #In this way, the original single-set-of-parents function can be #retrieved if necessary. #""" #@functools.wraps(cross) #def ecspy_crossover(random, candidates, args): #if len(candidates) % 2 == 1: #candidates = candidates[:-1] #moms = candidates[::2] #dads = candidates[1::2] #children = [] #for i, (mom, dad) in enumerate(zip(moms, dads)): #cross.index = i #offspring = cross(random, mom, dad, args) #for o in offspring: #children.append(o) #return children #ecspy_crossover.single_crossover = cross #return ecspy_crossover #@crossover #def n_point_crossover(random, mom, dad, args): #"""Return the offspring of n-point crossover on the candidates. #This function performs n-point crossover (NPX). It selects *n* #random points without replacement at which to 'cut' the candidate #solutions and recombine them. #.. Arguments: #random -- the random number generator object #mom -- the first parent candidate #dad -- the second parent candidate #args -- a dictionary of keyword arguments #Optional keyword arguments in args: #- *crossover_rate* -- the rate at which crossover is performed #(default 1.0) #- *num_crossover_points* -- the number of crossover points used (default 1) #""" #crossover_rate = args.setdefault('crossover_rate', 1.0) #num_crossover_points = args.setdefault('num_crossover_points', 1) #children = [] #if random.random() < crossover_rate: #num_cuts = min(len(mom) - 1, num_crossover_points) #cut_points = random.sample(range(1, len(mom)), num_cuts) #cut_points.sort() #bro = copy.copy(dad) #sis = copy.copy(mom) #normal = True #for i, (m, d) in enumerate(zip(mom, dad)): #if i in cut_points: #normal = not normal #if not normal: #bro[i] = m #sis[i] = d #children.append(bro) #children.append(sis) #else: #children.append(mom) #children.append(dad) #return children #@crossover #def uniform_crossover(random, mom, dad, args): #"""Return the offspring of uniform crossover on the candidates. #This function performs uniform crossover (UX). For each element #of the parents, a biased coin is flipped to determine whether #the first offspring gets the 'mom' or the 'dad' element. An #optional keyword argument in args, ``ux_bias``, determines the bias. #.. Arguments: #random -- the random number generator object #mom -- the first parent candidate #dad -- the second parent candidate #args -- a dictionary of keyword arguments #Optional keyword arguments in args: #- *crossover_rate* -- the rate at which crossover is performed #(default 1.0) #- *ux_bias* -- the bias toward the first candidate in the crossover #(default 0.5) #""" #ux_bias = args.setdefault('ux_bias', 0.5) #crossover_rate = args.setdefault('crossover_rate', 1.0) #children = [] #if random.random() < crossover_rate: #bro = copy.copy(dad) #sis = copy.copy(mom) #for i, (m, d) in enumerate(zip(mom, dad)): #if random.random() < ux_bias: #bro[i] = m #sis[i] = d #children.append(bro) #children.append(sis) #else: #children.append(mom) #children.append(dad) #return children #@crossover #def partially_matched_crossover(random, mom, dad, args): #"""Return the offspring of partially matched crossover on the candidates. #This function performs partially matched crossover (PMX). This type of #crossover assumes that candidates are composed of discrete values that #are permutations of a given set (typically integers). It produces offspring #that are themselves permutations of the set. #.. Arguments: #random -- the random number generator object #mom -- the first parent candidate #dad -- the second parent candidate #args -- a dictionary of keyword arguments #Optional keyword arguments in args: #- *crossover_rate* -- the rate at which crossover is performed #(default 1.0) #""" #crossover_rate = args.setdefault('crossover_rate', 1.0) #if random.random() < crossover_rate: #size = len(mom) #points = random.sample(range(size), 2) #x, y = min(points), max(points) #bro = copy.copy(dad) #bro[x:y + 1] = mom[x:y + 1] #sis = copy.copy(mom) #sis[x:y + 1] = dad[x:y + 1] #for parent, child in zip([dad, mom], [bro, sis]): #for i in range(x, y + 1): #if parent[i] not in child[x:y + 1]: #spot = i #while x <= spot <= y: #spot = parent.index(child[spot]) #child[spot] = parent[i] #return [bro, sis] #else: #return [mom, dad] #@crossover #def arithmetic_crossover(random, mom, dad, args): #"""Return the offspring of arithmetic crossover on the candidates. #This function performs arithmetic crossover (AX), which is similar to a #generalized weighted averaging of the candidate elements. The allele #of each parent is weighted by the *ax_alpha* keyword argument, and #the allele of the complement parent is weighted by 1 - *ax_alpha*. #This averaging is only done on the alleles listed in the *ax_points* #keyword argument. If this argument is ``None``, then all alleles #are used. This means that if this function is used with all default #values, then offspring are simple averages of their parents. #This function also makes use of the bounder function as specified #in the EC's ``evolve`` method. #.. Arguments: #random -- the random number generator object #mom -- the first parent candidate #dad -- the second parent candidate #args -- a dictionary of keyword arguments #Optional keyword arguments in args: #- *crossover_rate* -- the rate at which crossover is performed #(default 1.0) #- *ax_alpha* -- the weight for the averaging (default 0.5) #- *ax_points* -- a list of points specifying the alleles to #recombine (default None) #""" #ax_alpha = args.setdefault('ax_alpha', 0.5) #ax_points = args.setdefault('ax_points', None) #crossover_rate = args.setdefault('crossover_rate', 1.0) #bounder = args['_ec'].bounder #children = [] #if random.random() < crossover_rate: #bro = copy.copy(dad) #sis = copy.copy(mom) #if ax_points is None: #ax_points = list(range(min(len(bro), len(sis)))) #for i in ax_points: #bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i] #sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i] #bro = bounder(bro, args) #sis = bounder(sis, args) #children.append(bro) #children.append(sis) #else: #children.append(mom) #children.append(dad) #return children #@crossover #def blend_crossover(random, mom, dad, args): #"""Return the offspring of blend crossover on the candidates. #This function performs blend crossover (BLX), which is similar to #arithmetic crossover with a bit of mutation. It creates offspring #whose values are chosen randomly from a range bounded by the #parent alleles but that is also extended by some amount proportional #to the *blx_alpha* keyword argument. It is this extension of the #range that provides the additional exploration. This averaging is #only done on the alleles listed in the *blx_points* keyword argument. #If this argument is ``None``, then all alleles are used. This function #also makes use of the bounder function as specified in the EC's #``evolve`` method. #.. Arguments: #random -- the random number generator object #mom -- the first parent candidate #dad -- the second parent candidate #args -- a dictionary of keyword arguments #Optional keyword arguments in args: #- *crossover_rate* -- the rate at which crossover is performed #(default 1.0) #- *blx_alpha* -- the blending rate (default 0.1) #- *blx_points* -- a list of points specifying the alleles to #recombine (default None) #""" #blx_alpha = args.setdefault('blx_alpha', 0.1) #blx_points = args.setdefault('blx_points', None) #crossover_rate = args.setdefault('crossover_rate', 1.0) #bounder = args['_ec'].bounder #children = [] #if random.random() < crossover_rate: #bro = copy.copy(dad) #sis = copy.copy(mom) #if blx_points is None: #blx_points = list(range(min(len(bro), len(sis)))) #for i in blx_points: #smallest, largest = min(mom[i], dad[i]), max(mom[i], dad[i]) #delta = blx_alpha * (largest - smallest) #bro[i] = smallest - delta + random.random() * ( #largest - smallest + 2 * delta) #sis[i] = smallest - delta + random.random() * ( #largest - smallest + 2 * delta) #bro = bounder(bro, args) #sis = bounder(sis, args) #children.append(bro) #children.append(sis) #else: #children.append(mom) #children.append(dad) #return children #def heuristic_crossover(random, candidates, args): #"""Return the offspring of heuristic crossover on the candidates. #It performs heuristic crossover (HX), which is similar to the #update rule used in particle swarm optimization. This function #also makes use of the bounder function as specified in the EC's #``evolve`` method. #.. note:: #This function assumes that candidates can be pickled (for hashing #as keys to a dictionary). #.. Arguments: #random -- the random number generator object #candidates -- the candidate solutions #args -- a dictionary of keyword arguments #Optional keyword arguments in args: #- *crossover_rate* -- the rate at which crossover is performed #(default 1.0) #""" #crossover_rate = args.setdefault('crossover_rate', 1.0) #bounder = args['_ec'].bounder #if len(candidates) % 2 == 1: #candidates = candidates[:-1] ## Since we don't have fitness information in the candidates, we need ## to make a dictionary containing the candidate and its corresponding ## individual in the population. #population = list(args['_ec'].population) #lookup = dict( #zip([pickle.dumps(p.candidate, 1) for p in population], population)) #moms = candidates[::2] #dads = candidates[1::2] #children = [] #for mom, dad in zip(moms, dads): #if random.random() < crossover_rate: #bro = copy.copy(dad) #sis = copy.copy(mom) #mom_is_better = lookup[pickle.dumps( #mom, 1)] > lookup[pickle.dumps(dad, 1)] #for i, (m, d) in enumerate(zip(mom, dad)): #negpos = 1 if mom_is_better else -1 #val = d if mom_is_better else m #bro[i] = val + random.random() * negpos * (m - d) #sis[i] = val + random.random() * negpos * (m - d) #bro = bounder(bro, args) #sis = bounder(sis, args) #children.append(bro) #children.append(sis) #else: #children.append(mom) #children.append(dad) #return children #@crossover #def simulated_binary_crossover(random, mom, dad, args): #"""Return the offspring of simulated binary crossover on the candidates. #This function performs simulated binary crossover (SBX), following the #implementation in NSGA-II #`(Deb et al., ICANNGA 1999) <http://vision.ucsd.edu/~sagarwal/icannga.pdf>`_. #.. Arguments: #random -- the random number generator object #mom -- the first parent candidate #dad -- the second parent candidate #args -- a dictionary of keyword arguments #Optional keyword arguments in args: #- *crossover_rate* -- the rate at which crossover is performed #(default 1.0) #- *sbx_distribution_index* -- the non-negative distribution index #(default 10) #A small value of the `sbx_distribution_index` optional argument allows #solutions far away from parents to be created as child solutions, #while a large value restricts only near-parent solutions to be created as #child solutions. #""" #crossover_rate = args.setdefault('crossover_rate', 1.0) #if random.random() < crossover_rate: #di = args.setdefault('sbx_distribution_index', 10) #bounder = args['_ec'].bounder #bro = copy.copy(dad) #sis = copy.copy(mom) #for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)): #try: #if m > d: #m, d = d, m #beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m) #alpha = 2.0 - 1.0 / beta ** (di + 1.0) #u = random.random() #if u <= (1.0 / alpha): #beta_q = (u * alpha) ** (1.0 / float(di + 1.0)) #else: #beta_q = ( #1.0 / (2.0 - u * alpha)) ** (1.0 / float(di + 1.0)) #bro_val = 0.5 * ((m + d) - beta_q * (d - m)) #bro_val = max(min(bro_val, ub), lb) #sis_val = 0.5 * ((m + d) + beta_q * (d - m)) #sis_val = max(min(sis_val, ub), lb) #if random.random() > 0.5: #bro_val, sis_val = sis_val, bro_val #bro[i] = bro_val #sis[i] = sis_val #except ZeroDivisionError: ## The offspring already have legitimate values for every element, ## so no need to take any special action here. #pass #return [bro, sis] #else: #return [mom, dad] #@crossover #def laplace_crossover(random, mom, dad, args): #"""Return the offspring of Laplace crossover on the candidates. #This function performs Laplace crosssover (LX), following the #implementation specified in (Deep and Thakur, "A new crossover #operator for real coded genetic algorithms," Applied Mathematics #and Computation, Volume 188, Issue 1, May 2007, pp. 895--911). #This function also makes use of the bounder function as specified #in the EC's ``evolve`` method. #.. Arguments: #random -- the random number generator object #mom -- the first parent candidate #dad -- the second parent candidate #args -- a dictionary of keyword arguments #Optional keyword arguments in args: #- *crossover_rate* -- the rate at which crossover is performed #(default 1.0) #- *lx_location* -- the location parameter (default 0) #- *lx_scale* -- the scale parameter (default 0.5) #In some sense, the *lx_location* and *lx_scale* parameters can be thought #of as analogs in a Laplace distribution to the mean and standard #deviation of a Gaussian distribution. If *lx_scale* is near zero, offspring #will be produced near the parents. If *lx_scale* is farther from zero, #offspring will be produced far from the parents. #""" #crossover_rate = args.setdefault('crossover_rate', 1.0) #if random.random() < crossover_rate: #bounder = args['_ec'].bounder #a = args.setdefault('lx_location', 0) #b = args.setdefault('lx_scale', 0.5) #bro = copy.copy(dad) #sis = copy.copy(mom) #for i, (m, d) in enumerate(zip(mom, dad)): #u = random.random() #if random.random() <= 0.5: #beta = a - b * math.log(u) #else: #beta = a + b * math.log(u) #bro[i] = m + beta * abs(m - d) #sis[i] = d + beta * abs(m - d) #bro = bounder(bro, args) #sis = bounder(sis, args) #return [bro, sis] #else: #return [mom, dad]
aelaguiz/pyvotune
pyvotune/variators/crossovers.py
Python
mit
21,399
[ "Gaussian" ]
dd2cd092d250ff5f00c255cd037ee361db86a3ed7debb479c00e59e482bf9ceb
# Copyright (C) 2010-2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest as ut import unittest_decorators as utx import numpy as np import itertools import espressomd import espressomd.shapes import espressomd.lb AGRID = 1.5 VISC = 4.2 DENS = 1.3 FRIC = 1.4 TAU = 0.2 BOX_L = 18.0 TIME_STEP = TAU LB_PARAMETERS = { 'agrid': AGRID, 'visc': VISC, 'dens': DENS, 'tau': TAU } V_BOUNDARY = 0.6 def velocity_profile(x): return V_BOUNDARY / (BOX_L - 2. * AGRID) * (x - AGRID) class LBInterpolation: """ Couette flow profile along x in z-direction. Check that velocity at shear plane next to the resting boundary is zero. """ lbf = None system = espressomd.System(box_l=[BOX_L] * 3) system.cell_system.skin = 0.4 * AGRID system.time_step = TIME_STEP def set_boundaries(self, velocity): """Place boundaries *not* exactly on a LB node.""" wall_shape1 = espressomd.shapes.Wall( normal=[1, 0, 0], dist=0.6 * AGRID) wall_shape2 = espressomd.shapes.Wall( normal=[-1, 0, 0], dist=-(BOX_L - 0.6 * AGRID)) self.system.lbboundaries.add( espressomd.lbboundaries.LBBoundary(shape=wall_shape1)) self.system.lbboundaries.add( espressomd.lbboundaries.LBBoundary(shape=wall_shape2, velocity=velocity)) def test_interpolated_velocity(self): """ Check that the interpolated LB fluid velocity is zero between boundary node and first fluid node. """ self.set_boundaries([0.0, 0.0, V_BOUNDARY]) self.system.integrator.run(250) # Shear plane for boundary 1 # for pos in itertools.product((AGRID,), np.arange(0.5 * AGRID, BOX_L, AGRID), np.arange(0.5 * AGRID, BOX_L, AGRID)): # np.testing.assert_almost_equal(self.lbf.get_interpolated_velocity(pos)[2], 0.0) # Bulk for pos in itertools.product( np.arange(1.5 * AGRID, BOX_L - 1.5 * AGRID, 0.5 * AGRID), np.arange(0.5 * AGRID, BOX_L, AGRID), np.arange(0.5 * AGRID, BOX_L, AGRID)): np.testing.assert_almost_equal( self.lbf.get_interpolated_velocity(pos)[2], velocity_profile(pos[0]), decimal=4) # Shear plane for boundary 2 # for pos in itertools.product((9 * AGRID,), np.arange(0.5 * AGRID, BOX_L, AGRID), np.arange(0.5 * AGRID, BOX_L, AGRID)): # np.testing.assert_almost_equal(self.lbf.get_interpolated_velocity(pos)[2], # 1.0, decimal=4) def test_mach_limit_check(self): """ Assert that the mach number check fires an exception. """ max_vel = 0.31 * AGRID / TAU with self.assertRaises(Exception): self.set_boundaries([0.0, 0.0, max_vel]) self.system.integrator.run(1) @utx.skipIfMissingFeatures(['LB_BOUNDARIES']) class LBInterpolationCPU(ut.TestCase, LBInterpolation): def setUp(self): self.system.lbboundaries.clear() self.system.actors.clear() self.lbf = espressomd.lb.LBFluid(**LB_PARAMETERS) self.system.actors.add(self.lbf) @utx.skipIfMissingGPU() @utx.skipIfMissingFeatures(['LB_BOUNDARIES_GPU']) class LBInterpolationGPU(ut.TestCase, LBInterpolation): def setUp(self): self.system.lbboundaries.clear() self.system.actors.clear() self.lbf = espressomd.lb.LBFluidGPU(**LB_PARAMETERS) self.system.actors.add(self.lbf) if __name__ == "__main__": ut.main()
psci2195/espresso-ffans
testsuite/python/lb_interpolation.py
Python
gpl-3.0
4,144
[ "ESPResSo" ]
30949c2c2d7ee925eb6f3a9be845a915bc1f79350ebc912d8c00add31172620f
# Too much Tinker # From http://cseatglance.blogspot.com/2015/08/a-simple-gui-game-with-python-tkinter.html?spref=tw # Typer: Ginny C Ghezzo # What I learned: from tkinter import * import random import tkinter.messagebox root = Tk() Label(root, text="Find Out the Hidden Position").pack() def findcurpos(event): print("You Clicked at ", event.x, event.y) if event.x in range(random.randint(0,130)): if event.y in range(80,100): tkinter.messagebox.showinfo("You win", "you win!!") print("you win") myframe=Frame(root,bg='beige',width=130,height=130) myframe.bind("<Button-1>", findcurpos) myframe.pack() root.mainloop()
gghezzo/prettypython
PythonEveryDay2015/TooMuchTinker.py
Python
mit
636
[ "TINKER" ]
227f79154b938b83e44860e58fdc9306b889ffdd8f806aa125ed03394598d247
import click from parsec.cli import pass_context, json_loads from parsec.decorators import custom_exception, json_output @click.command('show_job') @click.argument("job_id", type=str) @click.option( "--full_details", help="when ``True``, the complete list of details for the given job.", is_flag=True ) @pass_context @custom_exception @json_output def cli(ctx, job_id, full_details=False): """Get details of a given job of the current user. Output: A description of the given job. For example:: {'create_time': '2014-03-01T16:17:29.828624', 'exit_code': 0, 'id': 'a799d38679e985db', 'inputs': {'input': {'id': 'ebfb8f50c6abde6d', 'src': 'hda'}}, 'model_class': 'Job', 'outputs': {'output': {'id': 'a799d38679e985db', 'src': 'hda'}}, 'params': {'chromInfo': '"/opt/galaxy-central/tool-data/shared/ucsc/chrom/?.len"', 'dbkey': '"?"', 'seq_col': '"2"', 'title_col': '["1"]'}, 'state': 'ok', 'tool_id': 'tab2fasta', 'update_time': '2014-03-01T16:17:31.930728'} """ return ctx.gi.jobs.show_job(job_id, full_details=full_details)
galaxy-iuc/parsec
parsec/commands/jobs/show_job.py
Python
apache-2.0
1,268
[ "Galaxy" ]
0d6dc2d5b316e91c27a7be339e835d4d88899059743ffccfd54ade6a38c3e71a
# coding=utf-8 """audio_read reads in a whole audio file with resampling.""" # Equivalent to: # import librosa # def audio_read(filename, sr=11025, channels=1): # """Read a soundfile, return (d, sr).""" # d, sr = librosa.load(filename, sr=sr, mono=(channels == 1)) # return d, sr # The code below is adapted from: # https://github.com/bmcfee/librosa/blob/master/librosa/core/audio.py # This is its original copyright notice: # Copyright (c) 2014, Brian McFee, Matt McVicar, Dawen Liang, Colin Raffel, Douglas Repetto, Dan Ellis. # # Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import division import os import re import subprocess import threading import time import numpy as np # For wavread fallback. import scipy.io.wavfile as wav try: import queue except ImportError: # noinspection PyUnresolvedReferences import Queue as queue # If ffmpeg is unavailable, you can set HAVE_FFMPEG to False which will cause # soundfile reads to go via scipy.io.wavfile. However, this means that only # *.wav files are supported *and* they must already be resampled to the # system sampling rate (e.g. 11025 Hz). HAVE_FFMPEG = True def wavread(filename): """Read in audio data from a wav file. Return d, sr.""" # Read in wav file. samplerate, wave_data = wav.read(filename) # Normalize short ints to floats in range [-1..1). data = np.asfarray(wave_data) / 32768.0 return data, samplerate def audio_read(filename, sr=None, channels=None): """Read a soundfile, return (d, sr).""" if HAVE_FFMPEG: return audio_read_ffmpeg(filename, sr, channels) else: data, samplerate = wavread(filename) if channels == 1 and len(data.shape) == 2 and data.shape[-1] != 1: # Convert stereo to mono. data = np.mean(data, axis=-1) if sr and sr != samplerate: raise ValueError("Wav file has samplerate %f but %f requested." % ( samplerate, sr)) return data, samplerate def audio_read_ffmpeg(filename, sr=None, channels=None): """Read a soundfile, return (d, sr).""" # Hacked version of librosa.load and audioread/ff. offset = 0.0 duration = None dtype = np.float32 y = [] with FFmpegAudioFile(os.path.realpath(filename), sample_rate=sr, channels=channels) as input_file: sr = input_file.sample_rate channels = input_file.channels s_start = int(np.floor(sr * offset) * channels) if duration is None: s_end = np.inf else: s_end = s_start + int(np.ceil(sr * duration) * channels) num_read = 0 for frame in input_file: frame = buf_to_float(frame, dtype=dtype) num_read_prev = num_read num_read += len(frame) if num_read < s_start: # offset is after the current frame, keep reading. continue if s_end < num_read_prev: # we're off the end. stop reading break if s_end < num_read: # the end is in this frame. crop. frame = frame[:s_end - num_read_prev] if num_read_prev <= s_start < num_read: # beginning is in this frame frame = frame[(s_start - num_read_prev):] # tack on the current frame y.append(frame) if not len(y): # Zero-length read y = np.zeros(0, dtype=dtype) else: y = np.concatenate(y) if channels > 1: y = y.reshape((-1, 2)).T # Final cleanup for dtype and contiguity y = np.ascontiguousarray(y, dtype=dtype) return (y, sr) def buf_to_float(x, n_bytes=2, dtype=np.float32): """Convert an integer buffer to floating point values. This is primarily useful when loading integer-valued wav data into numpy arrays. .. seealso:: :func:`librosa.util.buf_to_float` :parameters: - x : np.ndarray [dtype=int] The integer-valued data buffer - n_bytes : int [1, 2, 4] The number of bytes per sample in ``x`` - dtype : numeric type The target output type (default: 32-bit float) :return: - x_float : np.ndarray [dtype=float] The input data buffer cast to floating point """ # Invert the scale of the data scale = 1. / float(1 << ((8 * n_bytes) - 1)) # Construct the format string fmt = '<i{:d}'.format(n_bytes) # Rescale and format the data buffer return scale * np.frombuffer(x, fmt).astype(dtype) # The code below is adapted from: # https://github.com/sampsyo/audioread/blob/master/audioread/ffdec.py # Below is its original copyright notice: # This file is part of audioread. # Copyright 2014, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. class QueueReaderThread(threading.Thread): """A thread that consumes data from a filehandle and sends the data over a Queue. """ def __init__(self, fh, blocksize=1024, discard=False): super(QueueReaderThread, self).__init__() self.fh = fh self.blocksize = blocksize self.daemon = True self.discard = discard self.queue = None if discard else queue.Queue() def run(self): while True: data = self.fh.read(self.blocksize) if not self.discard: self.queue.put(data) if not data: # Stream closed (EOF). break class FFmpegAudioFile(object): """An audio file decoded by the ffmpeg command-line utility.""" def __init__(self, filename, channels=None, sample_rate=None, block_size=4096): if not os.path.isfile(filename): raise ValueError(filename + " not found.") popen_args = ['ffmpeg', '-i', filename, '-f', 's16le'] self.channels = channels self.sample_rate = sample_rate if channels: popen_args.extend(['-ac', str(channels)]) if sample_rate: popen_args.extend(['-ar', str(sample_rate)]) popen_args.append('-') self.proc = subprocess.Popen( popen_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) # Start another thread to consume the standard output of the # process, which contains raw audio data. self.stdout_reader = QueueReaderThread(self.proc.stdout, block_size) self.stdout_reader.start() # Read relevant information from stderr. try: self._get_info() except ValueError: raise ValueError("Error reading header info from " + filename) # Start a separate thread to read the rest of the data from # stderr. This (a) avoids filling up the OS buffer and (b) # collects the error output for diagnosis. self.stderr_reader = QueueReaderThread(self.proc.stderr) self.stderr_reader.start() def read_data(self, timeout=10.0): """Read blocks of raw PCM data from the file.""" # Read from stdout in a separate thread and consume data from # the queue. start_time = time.time() while True: # Wait for data to be available or a timeout. data = None try: data = self.stdout_reader.queue.get(timeout=timeout) if data: yield data else: # End of file. break except queue.Empty: # Queue read timed out. end_time = time.time() if not data: if end_time - start_time >= timeout: # Nothing interesting has happened for a while -- # FFmpeg is probably hanging. raise ValueError('ffmpeg output: {}'.format( ''.join(self.stderr_reader.queue.queue) )) else: start_time = end_time # Keep waiting. continue def _get_info(self): """Reads the tool's output from its stderr stream, extracts the relevant information, and parses it. """ out_parts = [] while True: line = self.proc.stderr.readline() if not line: # EOF and data not found. raise ValueError("stream info not found") # In Python 3, result of reading from stderr is bytes. if isinstance(line, bytes): line = line.decode('utf8', 'ignore') line = line.strip().lower() if 'no such file' in line: raise IOError('file not found') elif 'invalid data found' in line: raise ValueError() elif 'duration:' in line: out_parts.append(line) elif 'audio:' in line: out_parts.append(line) self._parse_info(''.join(out_parts)) break def _parse_info(self, s): """Given relevant data from the ffmpeg output, set audio parameter fields on this object. """ # Sample rate. match = re.search(r'(\d+) hz', s) if match: self.sample_rate_orig = int(match.group(1)) else: self.sample_rate_orig = 0 if self.sample_rate is None: self.sample_rate = self.sample_rate_orig # Channel count. match = re.search(r'hz, ([^,]+),', s) if match: mode = match.group(1) if mode == 'stereo': self.channels_orig = 2 else: match = re.match(r'(\d+) ', mode) if match: self.channels_orig = int(match.group(1)) else: self.channels_orig = 1 else: self.channels_orig = 0 if self.channels is None: self.channels = self.channels_orig # Duration. match = re.search( r'duration: (\d+):(\d+):(\d+).(\d)', s ) if match: durparts = list(map(int, match.groups())) duration = ( durparts[0] * 60 * 60 + durparts[1] * 60 + durparts[2] + float(durparts[3]) / 10 ) self.duration = duration else: # No duration found. self.duration = 0 def close(self): """Close the ffmpeg process used to perform the decoding.""" # Kill the process if it is still running. if hasattr(self, 'proc') and self.proc.returncode is None: self.proc.kill() self.proc.wait() def __del__(self): self.close() # Iteration. def __iter__(self): return self.read_data() # Context manager. def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() return False
dpwe/audfprint
audio_read.py
Python
mit
12,385
[ "Brian" ]
a09f1477115560aaf4c9fd85fc19e6d39ace8ad790f2c300759f17ae335f1cfa
#!/usr/bin/env python # # $Id: check_keywords.py 9317 2011-06-10 02:09:04Z nathan_george $ # # Proprietary and confidential. # Copyright $Date:: 2011#$ Perfect Search Corporation. # All rights reserved. # import sys, os, re, sandbox, svnwrap, metadata, optparse, xmail from ioutil import * parser = optparse.OptionParser('Usage: %prog [options] [path]\n\nSee whether svn keyword expansion is enabled on source code files. Optionally, email report.') xmail.addMailOptions(parser) HELP_PAT = re.compile(r'(--?|/)(\?|(h(elp)?))$', re.IGNORECASE) EXT_PAT = metadata.INTERESTING_EXT_PAT NON_RECURSING_FOLDERS = ['.svn','boost','.metadata','build','Archive','Dist','Install'] KEYWORDS_PROP = "svn:keywords" def checkFile(root, name, relativePath, warn=True): path = os.path.join(root, name) if os.path.getsize(path) == 0: return 0 answer = svnwrap.propget(path, KEYWORDS_PROP).strip() if not answer: if warn: print(' %s: Warning: svn:keywords property not set.' % os.path.join(relativePath, name)) return 1 else: pass #print('%s svn:keywords = %s' % (name, answer)) return 0 class KeywordCheckVisitor: def __init__(self, warn): self.warn = warn self.badFiles = [] def visit(self, folder, item, relativePath): #print('visited %s' % item) err = checkFile(folder, item, relativePath, self.warn) if err: self.badFiles.append(folder + item) def check(path, warn=True): if not os.path.isdir(path): sys.stderr.write('%s is not a valid folder.\n' % path) return 1 path = norm_folder(path) print('Checking svn:keywords in %s...\n' % path) visitor = KeywordCheckVisitor(warn) checkedFiles, checkedFolders = metadata.visit(path, visitor) print('Checked %d files in %d folders; found %d errors.' % (checkedFiles, checkedFolders, len(visitor.badFiles))) return visitor.badFiles def main(warn, folder, options=None): badFiles = None exitCode = 0 if not folder: folder = sandbox.current.get_code_root() oldStdout = None sendEmail = xmail.hasDest(options) if sendEmail: oldStdout = sys.stdout sys.stdout = FakeFile() try: badFiles = check(folder, warn) if sendEmail: msg = sys.stdout.txt #print(msg) sys.stdout = oldStdout oldStdout = None xmail.sendmail(msg, sender='Keyword Scanner <code.scan@example.com>', subject='svn:keywords scan on %s' % metadata.get_friendly_name_for_path(folder), options=options) finally: if oldStdout: sys.stdout = oldStdout return badFiles if __name__ == '__main__': ( options, args ) = parser.parse_args() folder = None if args: folder = args[0] badFiles = main(True, folder, options) exitCode = 0 if badFiles: exitCode = len(badFiles) sys.exit(exitCode)
perfectsearch/sandman
code/buildscripts/codescan/check_keywords.py
Python
mit
2,960
[ "VisIt" ]
4680b0d9c1f0115c5e37bfed7bb7dba749fc57ce78fad4b78f97f7fbb24e56da
#!/usr/bin/env python """ Reconstruction script that implements math algorithm of B0 mass reconstruction Uses different models for fitting signal and background events Usage: python reconstruction.py -i [INPUT_FILENAME] [-t [TREE_NAME]] [-n [MAX_EVENTS]] [-b] [-f] [-l] [-q] [-r] [-v] Run python reconstruction.py --help for more details """ import sys import argparse import time import math import ROOT ROOT.PyConfig.IgnoreCommandLineOptions = True # to prevent TApplication from capturing command line options and breaking argparse. It must be placed right after module import from ROOT import TFile, RooRealVar, RooArgSet, RooDataSet from utility.common import reconstruct, show_plot from utility.UnreconstructableEventError import UnreconstructableEventError from utility.SignalModel import SignalModel from utility.BackgroundModel import BackgroundModel # few constants NBINS = 100 # Number of bins in the histogram XMIN = 4.5 # Left bound of the histogram XMAX = 6.5 # Right bound of the histogram PEAK_MIN = 4.7 # Minimum value of the peak PEAK_MAX = 5.5 # Maximum value of the peak def process(file_name, tree_name, max_events, n_bins, x_min, x_max, fit, background, peak_x_min, peak_x_max, draw_legend, plot_q_square, plot_momentum_resolution, mc_tree_name, verbose): """ A function that forms the main logic of the script Args: file_name (str): the name of the file to process tree_name (str): the name of the tree to process n_bins (int): the number of bins to be used in the histogram x_min (float): the left bound of the histogram x_max (float): the right bound of the histogram fit (bool): the flag that determines whether the data will be fitted background (bool): the flag that determines whether signal or background b_mass_data is processed peak_x_min (float): the left bound of the peak peak_x_max (float): the right bound of the peak draw_legend (bool): the flag that determines whether the histogram legend will be drawn plot_q_square (bool): the flag that determines whether the q^2 distribution will be plotted plot_momentum_resolution (bool): the flag that determines whether the tau and neutrino momentum resolution distributions will be plotted max_events (int): the maximum number of events that will be processed verbose (bool): the flag that switches inreased verbosity """ start_time = time.time() last_timestamp = time.time() # Opening the file and getting the branch input_file = TFile(file_name, 'read') event_tree = input_file.Get(tree_name) # Event counters processed_events = 0 # Number of processed events reconstructable_events = 0 # Events with valid tau+ and tau- decay vertex # Variables for RooFit b_mass = RooRealVar('mB', 'm_{B}', x_min, x_max) b_mass_data = RooDataSet('mB', 'm_{B} data', RooArgSet(b_mass)) # Storage for reconstructed B mass values if plot_q_square: q_square = RooRealVar('q2', 'q^{2}', 12.5, 22.5) q_square_data = RooDataSet('q2_data', 'q^{2} data', RooArgSet(q_square)) # q^2 values container if plot_momentum_resolution: error_p_tauplus_x = RooRealVar('error_p_tauplus_x', '#epsilon_{p_{#tau^{+}x}}', -2., 2.) error_p_tauplus_x_data = RooDataSet('error_p_tauplus_x_data', '#epsilon_{p_{#tau^{+}x}} data', RooArgSet(error_p_tauplus_x)) error_p_tauplus_y = RooRealVar('error_p_tauplus_y', '#epsilon_{p_{#tau^{+}y}}', -2., 2.) error_p_tauplus_y_data = RooDataSet('error_p_tauplus_y_data', '#epsilon_{p_{#tau^{+}y}} data', RooArgSet(error_p_tauplus_y)) error_p_tauplus_z = RooRealVar('error_p_tauplus_z', '#epsilon_{p_{#tau^{+}z}}', -2., 2.) error_p_tauplus_z_data = RooDataSet('error_p_tauplus_z_data', '#epsilon_{p_{#tau^{+}z}} data', RooArgSet(error_p_tauplus_z)) error_p_tauminus_x = RooRealVar('error_p_tauminus_x', '#epsilon_{p_{#tau^{-}x}}', -2., 2.) error_p_tauminus_x_data = RooDataSet('error_p_tauminus_x_data', '#epsilon_{p_{#tau^{-}x}} data', RooArgSet(error_p_tauminus_x)) error_p_tauminus_y = RooRealVar('error_p_tauminus_y', '#epsilon_{p_{#tau^{-}y}}', -2., 2.) error_p_tauminus_y_data = RooDataSet('error_p_tauminus_y_data', '#epsilon_{p_{#tau^{-}y}} data', RooArgSet(error_p_tauminus_y)) error_p_tauminus_z = RooRealVar('error_p_tauminus_z', '#epsilon_{p_{#tau^{-}z}}', -2., 2.) error_p_tauminus_z_data = RooDataSet('error_p_tauminus_z_data', '#epsilon_{p_{#tau^{-}z}} data', RooArgSet(error_p_tauminus_z)) error_p_nu_tauplus_x = RooRealVar('error_p_nu_tauplus_x', '#epsilon_{p_{#nu#tau^{+}x}}', -5., 5.) error_p_nu_tauplus_x_data = RooDataSet('error_p_nu_tauplus_x_data', '#epsilon_{p_{#nu#tau^{+}x}} data', RooArgSet(error_p_nu_tauplus_x)) error_p_nu_tauplus_y = RooRealVar('error_p_nu_tauplus_y', '#epsilon_{p_{#nu#tau^{+}y}}', -5., 5.) error_p_nu_tauplus_y_data = RooDataSet('error_p_nu_tauplus_y_data', '#epsilon_{p_{#nu#tau^{+}y}} data', RooArgSet(error_p_nu_tauplus_y)) error_p_nu_tauplus_z = RooRealVar('error_p_nu_tauplus_z', '#epsilon_{p_{#nu#tau^{+}z}}', -5., 5.) error_p_nu_tauplus_z_data = RooDataSet('error_p_nu_tauplus_z_data', '#epsilon_{p_{#nu#tau^{+}z}} data', RooArgSet(error_p_nu_tauplus_z)) error_p_nu_tauminus_x = RooRealVar('error_p_nu_tauminus_x', '#epsilon_{p_{#nu#tau^{-}x}}', -5., 5.) error_p_nu_tauminus_x_data = RooDataSet('error_p_nu_tauminus_x_data', '#epsilon_{p_{#nu#tau^{-}x}} data', RooArgSet(error_p_nu_tauminus_x)) error_p_nu_tauminus_y = RooRealVar('error_p_nu_tauminus_y', '#epsilon_{p_{#nu#tau^{-}y}}', -5., 5.) error_p_nu_tauminus_y_data = RooDataSet('error_p_nu_tauminus_y_data', '#epsilon_{p_{#nu#tau^{-}y}} data', RooArgSet(error_p_nu_tauminus_y)) error_p_nu_tauminus_z = RooRealVar('error_p_nu_tauminus_z', '#epsilon_{p_{#nu#tau^{-}z}}', -5., 5.) error_p_nu_tauminus_z_data = RooDataSet('error_p_nu_tauminus_z_data', '#epsilon_{p_{#nu#tau^{-}z}} data', RooArgSet(error_p_nu_tauminus_z)) # Loop through the events # for counter, (event, mc_event) in enumerate(zip(event_tree, mc_event_tree)): # this finest construction doesn't work for some reason for counter in xrange(event_tree.GetEntries()): # so we have to use an old one if counter < max_events: event_tree.GetEntry(counter) if plot_momentum_resolution: mc_event_tree.GetEntry(counter) processed_events += 1 if (counter + 1) % 100 == 0: # print status message every 100 events print('Processing event {} ({:.1f} events / s)'.format(counter + 1, 100. / (time.time() - last_timestamp))) last_timestamp = time.time() try: rec_ev = reconstruct(event_tree, verbose) reconstructable_events += 1 b_mass.setVal(rec_ev.m_b) b_mass_data.add(RooArgSet(b_mass)) if plot_q_square: q_square.setVal(rec_ev.q_square()) q_square_data.add(RooArgSet(q_square)) if plot_momentum_resolution: error_p_tauplus_x.setVal((rec_ev.p_tauplus.px - mc_event_tree.tauplus_px) / mc_event_tree.tauplus_px) error_p_tauplus_x_data.add(RooArgSet(error_p_tauplus_x)) error_p_tauplus_y.setVal((rec_ev.p_tauplus.py - mc_event_tree.tauplus_py) / mc_event_tree.tauplus_py) error_p_tauplus_y_data.add(RooArgSet(error_p_tauplus_y)) error_p_tauplus_z.setVal((rec_ev.p_tauplus.pz - mc_event_tree.tauplus_pz) / mc_event_tree.tauplus_pz) error_p_tauplus_z_data.add(RooArgSet(error_p_tauplus_z)) error_p_tauminus_x.setVal((rec_ev.p_tauminus.px - mc_event_tree.tauminus_px) / mc_event_tree.tauminus_px) error_p_tauminus_x_data.add(RooArgSet(error_p_tauminus_x)) error_p_tauminus_y.setVal((rec_ev.p_tauminus.py - mc_event_tree.tauminus_py) / mc_event_tree.tauminus_py) error_p_tauminus_y_data.add(RooArgSet(error_p_tauminus_y)) error_p_tauminus_z.setVal((rec_ev.p_tauminus.pz - mc_event_tree.tauminus_pz) / mc_event_tree.tauminus_pz) error_p_tauminus_z_data.add(RooArgSet(error_p_tauminus_z)) error_p_nu_tauplus_x.setVal((rec_ev.p_nu_tauplus.px - mc_event_tree.nu_tauplus_px) / mc_event_tree.nu_tauplus_px) error_p_nu_tauplus_x_data.add(RooArgSet(error_p_nu_tauplus_x)) error_p_nu_tauplus_y.setVal((rec_ev.p_nu_tauplus.py - mc_event_tree.nu_tauplus_py) / mc_event_tree.nu_tauplus_py) error_p_nu_tauplus_y_data.add(RooArgSet(error_p_nu_tauplus_y)) error_p_nu_tauplus_z.setVal((rec_ev.p_nu_tauplus.pz - mc_event_tree.nu_tauplus_pz) / mc_event_tree.nu_tauplus_pz) error_p_nu_tauplus_z_data.add(RooArgSet(error_p_nu_tauplus_z)) error_p_nu_tauminus_x.setVal((rec_ev.p_nu_tauminus.px - mc_event_tree.nu_tauminus_px) / mc_event_tree.nu_tauminus_px) error_p_nu_tauminus_x_data.add(RooArgSet(error_p_nu_tauminus_x)) error_p_nu_tauminus_y.setVal((rec_ev.p_nu_tauminus.py - mc_event_tree.nu_tauminus_py) / mc_event_tree.nu_tauminus_py) error_p_nu_tauminus_y_data.add(RooArgSet(error_p_nu_tauminus_y)) error_p_nu_tauminus_z.setVal((rec_ev.p_nu_tauminus.pz - mc_event_tree.nu_tauminus_pz) / mc_event_tree.nu_tauminus_pz) error_p_nu_tauminus_z_data.add(RooArgSet(error_p_nu_tauminus_z)) except UnreconstructableEventError: pass end_time = time.time() # printing some useful statistics print('{} events have been processed'.format(processed_events)) print('Elapsed time: {:.1f} s ({:.1f} events / s)'.format(end_time - start_time, float(processed_events) / (end_time - start_time))) print('Reconstruction efficiency: {} / {} = {:.3f}'.format(reconstructable_events, processed_events, float(reconstructable_events) / processed_events)) if fit: if background: model = BackgroundModel(name = 'background_model', title = 'Background Model', x = b_mass, mean = RooRealVar('mean', '#mu', 5.279, peak_x_min, peak_x_max), width_cb = RooRealVar('width_cb', '#sigma_{CB}', 0.2, 0.02, 1.), width_gauss = RooRealVar('width_gauss', '#sigma_{Gauss}', 0.2, 0.02, 1.), alpha = RooRealVar('alpha_cb', '#alpha_{CB}', -1., -10., -0.1), n = RooRealVar('n_cb', 'n_{CB}', 1., 0., 10.), gauss_fraction = RooRealVar('background_model_gauss_fraction', 'Fraction of Gaussian in Background Model', 0.3, 0.01, 1.) ) else: model = SignalModel(name = 'signal_model', title = 'Signal Model', x = b_mass, mean = RooRealVar('mean', '#mu', 5.279, peak_x_min, peak_x_max), width = RooRealVar('width_narrow_gauss', '#sigma', 0.03, 0.01, 0.1), width_wide = RooRealVar('width_wide_gauss', '#sigma_{wide}', 0.3, 0.1, 1.), alpha = RooRealVar('alpha', '#alpha', -1., -10., -0.1), n = RooRealVar('n', 'n', 2., 0.1, 10.), narrow_gauss_fraction = RooRealVar('signal_model_narrow_gauss_fraction', 'Fraction of Narrow Gaussian in Signal Model', 0.3, 0.01, 1.), cb_fraction = RooRealVar('signal_model_cb_fraction', 'Fraction of Crystal Ball Shape in Signal Model', 0.3, 0.01, 1.) ) show_plot(b_mass, b_mass_data, 'GeV/#it{c}^{2}', n_bins, fit, model.pdf, extended = False, components_to_plot = model.components, draw_legend = draw_legend) else: show_plot(b_mass, b_mass_data, 'GeV/#it{c}^{2}', n_bins) if plot_q_square: show_plot(q_square, q_square_data, 'GeV^{2}/#it{c}^{2}', n_bins) if plot_momentum_resolution: show_plot(error_p_tauplus_x, error_p_tauplus_x_data, None, n_bins) show_plot(error_p_tauplus_y, error_p_tauplus_y_data, None, n_bins) show_plot(error_p_tauplus_z, error_p_tauplus_z_data, None, n_bins) show_plot(error_p_tauminus_x, error_p_tauminus_x_data, None, n_bins) show_plot(error_p_tauminus_y, error_p_tauminus_y_data, None, n_bins) show_plot(error_p_tauminus_z, error_p_tauminus_z_data, None, n_bins) show_plot(error_p_nu_tauplus_x, error_p_nu_tauplus_x_data, None, n_bins) show_plot(error_p_nu_tauplus_y, error_p_nu_tauplus_y_data, None, n_bins) show_plot(error_p_nu_tauplus_z, error_p_nu_tauplus_z_data, None, n_bins) show_plot(error_p_nu_tauminus_x, error_p_nu_tauminus_x_data, None, n_bins) show_plot(error_p_nu_tauminus_y, error_p_nu_tauminus_y_data, None, n_bins) show_plot(error_p_nu_tauminus_z, error_p_nu_tauminus_z_data, None, n_bins) def main(argv): """The main function. Parses the command line arguments passed to the script and then runs the process function""" parser = argparse.ArgumentParser() parser.add_argument('-i', '--input-file', required = True, help = 'name of the file to process') parser.add_argument('-t', '--tree', type = str, default = 'Events', help = 'name of the event tree') parser.add_argument('-n', '--nevents', type = int, help = 'maximum number of events to process') parser.add_argument('-f', '--fit', action = 'store_true', help = 'fit the histogram') parser.add_argument('-b', '--background', action = 'store_true', help = 'use fit model for background events') parser.add_argument('-l', '--with-legend', action = 'store_true', help = 'draw legend') parser.add_argument('-q', '--q-square', action = 'store_true', help = 'plot q^2 distribution') parser.add_argument('-r', '--momentum-resolution', action = 'store_true', help = 'plot tau and neutrino momentum resolution distribution') parser.add_argument('-m', '--mctree', type = str, default = 'MCTruth', help = 'name of the tree with Monte-Carlo truth events') parser.add_argument('-v', '--verbose', action = 'store_true', help = 'run with increased verbosity') args = parser.parse_args() max_events = args.nevents if args.nevents else sys.maxint process(args.input_file, args.tree, max_events, NBINS, XMIN, XMAX, args.fit, args.background, PEAK_MIN, PEAK_MAX, args.with_legend, args.q_square, args.momentum_resolution, args.mctree, args.verbose) if __name__ == '__main__': main(sys.argv)
semkiv/fcc-reconstruction
reconstruction.py
Python
gpl-3.0
15,100
[ "CRYSTAL", "Gaussian" ]
c54d4ee276499447bbe90f55d98f40dbe06a5a7de4a166686dfde9b2f7c3d71e
# Photovoltaics surface # # Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari # # This file is part of Ladybug. # # Copyright (c) 2013-2015, Djordje Spasic and Jason Sensibaugh <djordjedspasic@gmail.com and sensij@yahoo.com> # Ladybug is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 3 of the License, # or (at your option) any later version. # # Ladybug is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ladybug; If not, see <http://www.gnu.org/licenses/>. # # @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+> """ Use this component to calculate amount of electrical energy that can be produced by a surface if a certain percentage of it is covered with Photovoltaics. Component based on NREL PVWatts v1 fixed tilt calculator for crystalline silicon (c-Si) photovoltaics. - Sources: http://www.nrel.gov/docs/fy14osti/60272.pdf https://pvpmc.sandia.gov - Provided by Ladybug 0.0.61 input: _epwFile: Input .epw file path by using the "File Path" parameter, or Ladybug's "Open EPW And STAT Weather Files" component. _PVsurface: - Input planar Surface (not a polysurface) on which the PV modules will be applied. If you have a polysurface, explode it (using "Deconstruct Brep" component) and then feed its Faces(F) output to _PVsurface. Surface normal should be faced towards the sun. - Or input surface Area, in square meters (example: "100"). - Or input PV system size (nameplate DC power rating), in kiloWatts at standard test conditions (example: "4 kw"). PVsurfacePercent_: The percentage of surface which will be used for PV modules (range 0-100). - Some countries and states, have local codes which limit the portion of the roof, which can be covered by crystalline silicon modules. For example, this may include having setbacks(distances) of approximatelly 90cm from side and top edges of a roof, as a fire safety regulation. - If not supplied, default value of 100 (all surface area will be covered in PV modules) is used. - In percent (%). PVsurfaceTiltAngle_: The angle from horizontal of the inclination of the PVsurface. Example: 0 = horizontal, 90 = vertical. (range 0-180) - If not supplied, but surface inputted into "_PVsurface", PVsurfaceTiltAngle will be calculated from an angle PVsurface closes with XY plane. If not supplied, but surface NOT inputted into "_PVsurface" (instead, a surface area or system size inputed), location's latitude will be used as default value. PVsurfaceAzimuthAngle_: The orientation angle (clockwise from the true north) of the PVsurface normal vector. (range 0-360) - If not supplied, but surface inputted into "_PVsurface", PVsurfaceAzimuthAngle will be calculated from an angle PVsurface closes with its north. If not supplied, but surface NOT inputted into "_PVsurface" (instead, a surface area or system size inputed), default value of 180 (south-facing) for locations in the northern hemisphere or 0 (north-facing) for locations in the southern hemisphere, will be used. DCtoACderateFactor_: Factor which accounts for various locations and instances in a PV system where power is lost from DC system nameplate to AC power. It ranges from 0 to 1. It can be calculated with Ladybug's "DC to AC derate factor" component. - If not supplied, default value of 0.85 will be used. moduleActiveAreaPercent_: Percentage of the module's area excluding module framing and gaps between cells. - If not supplied, default value of 90(%) will be used. - In percent (%). moduleType_: Module type and mounting configuration: - 0 = Glass/cell/glass, Close (flush) roof mount (pv array mounted parallel and relatively close to the plane of the roof (between two and six inches)) 1 = Glass/cell/polymer sheet, Insulated back (pv curtain wall, pv skylights) 2 = Glass/cell/polymer sheet, Open rack (ground mount array, flat/sloped roof array that is tilted, pole-mount solar panels, solar carports, solar canopies) 3 = Glass/cell/glass, Open rack (the same as upper "2" type, just with a glass on the back part of the module). - If not supplied, default type: "Glass/cell/glass, Close (flush) roof mount" (0) is used. moduleEfficiency_: The ratio of electrical energy output from the PV module to input solar energy from the sun. Current typical module efficiencies for crystalline silicon modules range from 14-20% - If not defined, default value of 15(%) will be used. - In percent (%). north_: Input a vector to be used as a true North direction, or a number between 0 and 360 that represents the clockwise degrees off from the Y-axis. - If not supplied, default North direction will be set to the Y-axis (0 degrees). albedo_: or Reflection coefficient - the average ratio of the global incident solar radiation reflected from the area surrounding the PV surface. It ranges from 0 for very dark (theoretically reflects no solar radiation) to 1 (theoretically reflects all of the incident solar radiation) for white surfaces. - It depends on the time of the year/day, surface type, temperature, vegetation, presence of water, ice and snow etc. Most PV calculation softwares use annual average value of 0.2 for cities, suburbs and countryside locations. - If not supplied, default value of 0.20 will be used, corrected for the presence of snow. - Unitless. annualHourlyData_: An optional list of hourly data from Ladybug's "Import epw" component (e.g. dryBulbTemperature), which will be used for "conditionalStatement_". conditionalStatement_: This input allows users to calculate the Photovoltaics surface component results only for those annualHourlyData_ values which fit specific conditions or criteria. To use this input correctly, hourly data, such as dryBulbTemperature or windSpeed, must be plugged into the "annualHourlyData_" input. The conditional statement input here should be a valid condition statement in Python, such as "a>25" or "b<3" (without the quotation marks). conditionalStatement_ accepts "and" and "or" operators. To visualize the hourly data, English letters should be used as variables, and each letter alphabetically corresponds to each of the lists (in their respective order): "a" always represents the 1st list, "b" always represents the 2nd list, etc. - For example, if you have an hourly dryBulbTemperature connected as the first list, and windSpeed connected as the second list (both to the annualHourlyData_ input), and you want to plot the data for the time period when temperature is between 18C and 23C, and windSpeed is larger than 3m/s, the conditionalStatement_ should be written as "18<a<23 and b>3" (without the quotation marks). _runIt: ... output: readMe!: ... ACenergyPerHour: AC power output for each hour during a year, in kWh ACenergyPerMonth: Total AC power output for each month, in kWh ACenergyPerYear: Total AC power output for a whole year, in kWh averageDailyACenergyPerMonth: An average AC power output per day in each month, in kWh/day averageDailyACenergyPerYear: An average AC power output per day in a whole year, in kWh/day DCenergyPerHour: DC power output of the PV array for each hour during a year, in kWh totalRadiationPerHour: Total Incident POA (Plane of array) irradiance for each hour during a year, in kWh/m2 moduleTemperaturePerHour: Module's back surface temperature for each hour during year, in C cellTemperaturePerHour: Cell temperature for each hour during year, in C nameplateDCpowerRating: DC rating or system size of the PV system. In kW PVcoverArea: An area of the inputted _PVsurface which will be covered with Photovoltaics. In m2 PVcoverActiveArea: coverArea with excluded module framing and gaps between cells. In m2 """ ghenv.Component.Name = "Ladybug_Photovoltaics Surface" ghenv.Component.NickName = "PhotovoltaicsSurface" ghenv.Component.Message = 'VER 0.0.61\nNOV_05_2015' ghenv.Component.Category = "Ladybug" ghenv.Component.SubCategory = "3 | EnvironmentalAnalysis" #compatibleLBVersion = VER 0.0.61\nNOV_03_2015 try: ghenv.Component.AdditionalHelpFromDocStrings = "4" except: pass import Grasshopper.Kernel as gh import rhinoscriptsyntax as rs import scriptcontext as sc import Rhino import math import re def getEpwData(epwFile, albedo): if epwFile: try: # location data locationName, latitude, longitude, timeZone, elevation, locationString = lb_preparation.epwLocation(epwFile) # weather data weatherData = lb_preparation.epwDataReader(epwFile, locationName) dryBulbTemperature, dewPointTemperature, relativeHumidity, windSpeed, windDirection, directNormalRadiation, diffuseHorizontalRadiation, globalHorizontalRadiation, directNormalIlluminance, diffuseHorizontalIlluminance, globalHorizontalIlluminance, totalSkyCover, liquidPrecipitationDepth, barometricPressure, modelYear = weatherData Ta = dryBulbTemperature[7:] ws = windSpeed[7:] DNI = directNormalRadiation[7:] DHI = diffuseHorizontalRadiation[7:] if (albedo == None) or (albedo < 0) or (albedo > 1): albedoL = lb_photovoltaics.calculateAlbedo(Ta) # default else: albedoL = [albedo for i in range(8760)] yearsHOY = modelYear[7:] monthsHOY = [1 for i in range(744)] + [2 for i in range(672)] + [3 for i in range(744)] + [4 for i in range(720)] + [5 for i in range(744)] + [6 for i in range(720)] + [7 for i in range(744)] + [8 for i in range(744)] + [9 for i in range(720)] + [10 for i in range(744)] + [11 for i in range(720)] + [12 for i in range(744)] numberOfDaysMonth = [31,28,31,30,31,30,31,31,30,31,30,31] daysHOY = [] day = 1 for i,item in enumerate(numberOfDaysMonth): for k in range(item): for g in range(24): daysHOY.append(day) day += 1 day = 1 hoursHOY = [] hour = 1 for i in range(365): for k in range(24): hoursHOY.append(hour) hour += 1 hour = 1 HOYs = range(1,8761) validEpwData = True printMsg = "ok" return locationName, float(latitude), float(longitude), float(timeZone), float(elevation), Ta, ws, DNI, DHI, yearsHOY, monthsHOY, daysHOY, hoursHOY, HOYs, albedoL, validEpwData, printMsg except Exception, e: # something is wrong with "_epwFile" input locationName = latitude = longitude = timeZone = elevation = Ta = ws = DNI = DHI = yearsHOY = monthsHOY = daysHOY = hoursHOY = HOYs = albedoL = None validEpwData = False printMsg = "Something is wrong with \"_epwFile\" input." else: locationName = latitude = longitude = timeZone = elevation = Ta = ws = DNI = DHI = yearsHOY = monthsHOY = daysHOY = hoursHOY = HOYs = albedoL = None validEpwData = False printMsg = "Please supply .epw file path to \"_epwFile\" input" return locationName, latitude, longitude, timeZone, elevation, Ta, ws, DNI, DHI, yearsHOY, monthsHOY, daysHOY, hoursHOY, HOYs, albedoL, validEpwData, printMsg def PVsurfaceInputData(PVsurface, PVsurfacePercent, unitAreaConversionFactor, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency): if (PVsurface == None): PVsurfaceInputType = nameplateDCpowerRating = srfArea = activeArea = PVsurfacePercent = DCtoACderateFactor = moduleActiveAreaPercent = moduleType = moduleEfficiency = None validPVsurfaceData = False printMsg = "Please input Surface (not polysurface) to \"_PVsurface\".\nOr input surface Area in square meters (example: \"100\").\nOr input Nameplate DC power rating in kiloWatts (example: \"4 kw\")." return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg if (PVsurfacePercent == None) or (PVsurfacePercent < 0) or (PVsurfacePercent > 100): PVsurfacePercent = 100 # default value 100% if (DCtoACderateFactor == None) or (DCtoACderateFactor < 0) or (DCtoACderateFactor > 1): DCtoACderateFactor = 0.85 # default value (corresponds to 11.42% of PVWatts v5 Total Losses) if (moduleActiveAreaPercent == None) or (moduleActiveAreaPercent < 0) or (moduleActiveAreaPercent > 100): moduleActiveAreaPercent = 90 # default value in % if (moduleType == None) or (moduleType < 0) or (moduleType > 3): moduleType = 0 # Glass/cell/glass, Close (flush) roof mount if (moduleEfficiency == None) or (moduleEfficiency < 0) or (moduleEfficiency > 100): moduleEfficiency = 15 # for crystalline silicon # check PVsurface input obj = rs.coercegeometry(PVsurface) # input is surface if isinstance(obj,Rhino.Geometry.Brep): PVsurfaceInputType = "brep" facesCount = obj.Faces.Count if facesCount > 1: # inputted polysurface PVsurfaceInputType = nameplateDCpowerRating = srfArea = activeArea = PVsurfacePercent = DCtoACderateFactor = moduleActiveAreaPercent = moduleType = moduleEfficiency = None validPVsurfaceData = False printMsg = "The brep you supplied to \"_PVsurface\" is a polysurface. Please supply a surface" return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg else: # inputted brep with a single surface srfArea = Rhino.Geometry.AreaMassProperties.Compute(obj).Area * (PVsurfacePercent/100) # area in document units srfArea = srfArea * unitAreaConversionFactor # area in m2 activeArea = srfArea * (moduleActiveAreaPercent/100) # in m2 nameplateDCpowerRating = activeArea * (1 * (moduleEfficiency/100)) # in kW validPVsurfaceData = True printMsg = "ok" return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg else: PVsurfaceInputType = "number" try: # input is number (pv surface area in m2) srfArea = float(PVsurface) * (PVsurfacePercent/100) # area in document units srfArea = srfArea * unitAreaConversionFactor # area in m2 activeArea = srfArea * (moduleActiveAreaPercent/100) # in m2 nameplateDCpowerRating = activeArea * (1 * (moduleEfficiency/100)) # in kW validPVsurfaceData = True printMsg = "ok" return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg except Exception, e: pass # input is string (nameplateDCpowerRating in kW) lowerString = PVsurface.lower() if "kw" in lowerString: try: nameplateDCpowerRating = float(lowerString.replace("kw","")) * (PVsurfacePercent/100) # in kW activeArea = nameplateDCpowerRating / (1 * (moduleEfficiency/100)) # in m2 srfArea = activeArea * (100/moduleActiveAreaPercent) # in m2 validPVsurfaceData = True printMsg = "ok" return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg except: pass PVsurfaceInputType = nameplateDCpowerRating = srfArea = activeArea = PVsurfacePercent = DCtoACderateFactor = moduleActiveAreaPercent = moduleType = moduleEfficiency = None validPVsurfaceData = False printMsg = "Something is wrong with your \"_PVsurface\" input data.\nPlease check the \"_PVsurface\" input's description to understand which are the correct input types that it supports." return PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg def checkAnnualHourlyInputData(annualHourlyData): if annualHourlyData == []: annualHourlyDataLists = [] annualHourlyDataListsEpwNames = [] validAnnualHourlyData = True printMsg = "ok" return validAnnualHourlyData, annualHourlyDataLists, annualHourlyDataListsEpwNames, printMsg elif len(annualHourlyData) % 8767 != 0: annualHourlyDataLists = annualHourlyDataListsEpwNames = None validAnnualHourlyData = False printMsg = "Your annualHourlyData_ input is not correct. Please input complete 8767 items long list(s) from \"Ladybug_Import epw\" component" return annualHourlyDataLists, validAnnualHourlyData, annualHourlyDataListsEpwNames, printMsg else: annualHourlyDataLists = [] annualHourlyDataListsEpwNames = [] startIndex = 0 endIndex = 8767 for i in range(int(len(annualHourlyData)/8767)): untrimmedList = annualHourlyData[startIndex:endIndex] trimmedList = untrimmedList[7:] annualHourlyDataListsName = untrimmedList[2] annualHourlyDataLists.append(trimmedList) annualHourlyDataListsEpwNames.append(annualHourlyDataListsName) startIndex += 8767 endIndex += 8767 validAnnualHourlyData = True printMsg = "ok" return validAnnualHourlyData, annualHourlyDataLists, annualHourlyDataListsEpwNames, printMsg def checkConditionalStatement(conditionalStatement, annualHourlyDataLists, annualHourlyDataListsEpwNames, weatherPerHourDataSubLists, addZero): if conditionalStatement == None and len(annualHourlyDataLists) > 0: # conditionalStatement_ not inputted, annualHourlyData_ inputted validConditionalStatement = False weatherPerHourDataConditionalStatementSubLists = conditionalStatementForFinalPrint = None printMsg = "Please supply \"conditionalStatement_\" for inputted \"annualHourlyData_\" data." return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg elif conditionalStatement == None and len(annualHourlyDataLists) == 0: # conditionalStatement_ not inputted, annualHourlyData_ not inputted conditionalStatement = "True" else: # conditionalStatement_ inputted, annualHourlyData_ not if annualHourlyDataLists == []: validConditionalStatement = False weatherPerHourDataConditionalStatementSubLists = conditionalStatementForFinalPrint = None printMsg = "Please supply \"annualHourlyData_\" data for inputted \"conditionalStatement_\"." return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg else: # both conditionalStatement_ and annualHourlyData_ inputted conditionalStatement = conditionalStatement.lower() conditionalStatement = re.sub(r"\b([a-z])\b", r"\1[i]", conditionalStatement) annualHourlyDataListsNames = map(chr, range(97, 123)) # finalPrint conditonal statements for "printOutput" function if conditionalStatement != "True": # conditionalStatement_ not inputted # replace conditionalStatement annualHourlyDataListsNames[i] names with annualHourlyDataListsEpwNames: conditionalStatementForFinalPrint = conditionalStatement[:] for i in range(len(annualHourlyDataLists)): conditionalStatementForFinalPrint = conditionalStatementForFinalPrint.replace(annualHourlyDataListsNames[i]+"[i]", annualHourlyDataListsEpwNames[i]) else: conditionalStatementForFinalPrint = "No condition" annualHourlyDataListsNames = map(chr, range(97, 123)) numberOfLetters = 0 for letter in annualHourlyDataListsNames: changedLetter = letter+"[i]" if changedLetter in conditionalStatement: numberOfLetters += 1 if numberOfLetters > len(annualHourlyDataLists): validConditionalStatement = False weatherPerHourDataConditionalStatementSubLists = conditionalStatementForFinalPrint = None printMsg = "The number of a,b,c... variables you supplied in \"conditionalStatement_\" is larger than the number of \"annualHourlyData_\" lists you inputted. Please make the numbers of these two equal or less." return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg else: for i in range(len(annualHourlyDataLists)): exec("%s = %s" % (annualHourlyDataListsNames[i],annualHourlyDataLists[i])) try: weatherPerHourDataConditionalStatementSubLists = [] for i in range(len(weatherPerHourDataSubLists)): weatherPerHourDataConditionalStatementSubLists.append([]) for i in range(len(weatherPerHourDataSubLists[0])): exec("conditionalSt = %s" % conditionalStatement) if addZero == True: # add 0 if conditionalStatement == False if conditionalSt: for k in range(len(weatherPerHourDataConditionalStatementSubLists)): weatherPerHourDataConditionalStatementSubLists[k].append(weatherPerHourDataSubLists[k][i]) else: for k in range(len(weatherPerHourDataConditionalStatementSubLists)): weatherPerHourDataConditionalStatementSubLists[k].append(0) else: # skip the value if conditionalSt: for k in range(len(weatherPerHourDataConditionalStatementSubLists)): weatherPerHourDataConditionalStatementSubLists[k].append(weatherPerHourDataSubLists[k][i]) except Exception, e: validConditionalStatement = False weatherPerHourDataConditionalStatementSubLists = conditionalStatementForFinalPrint = None printMsg = "Your \"conditionalStatement_\" is incorrect. Please provide a valid conditional statement in Python, such as \"a>25 and b<80\" (without the quotation marks)" return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg if len(weatherPerHourDataConditionalStatementSubLists[0]) == 0: validConditionalStatement = False weatherPerHourDataConditionalStatementSubLists = conditionalStatementForFinalPrint = None printMsg = "No \"annualHourlyData_\" coresponds to \"conditionalStatement_\". Please edit your \"conditionalStatement_\"" return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg else: validConditionalStatement = True printMsg = "ok" return validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg def main(latitude, longitude, timeZone, locationName, years, months, days, hours, HOYs, nameplateDCpowerRating, DCtoACderateFactor, srfArea, srfTiltD, srfAzimuthD, moduleType, moduleEfficiency, dryBulbTemperature, windSpeed, directNormalRadiation, diffuseHorizontalRadiation, albedoL): # solar radiation, AC,DC power output, module temperature, cell temperature ACenergyPerHour = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "AC power output", "kWh", "Hourly", (1, 1, 1), (12, 31, 24)] DCenergyPerHour = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "DC power output", "kWh", "Hourly", (1, 1, 1), (12, 31, 24)] totalRadiationPerHour = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "Total POA irradiance", "kWh/m2", "Hourly", (1, 1, 1), (12, 31, 24)] moduleTemperaturePerHour = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "Module temperature", "C", "Hourly", (1, 1, 1), (12, 31, 24)] cellTemperaturePerHour = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "Cell temperature", "C", "Hourly", (1, 1, 1), (12, 31, 24)] hoyForMonths = [0, 744, 1416, 2160, 2880, 3624, 4344, 5088, 5832, 6552, 7296, 8016, 8760, 9000] numberOfDaysInThatMonth = [31,28,31,30,31,30,31,31,30,31,30,31] monthsOfYearHoyPac = [[],[],[],[],[],[],[],[],[],[],[],[]] averageDailyACenergyPerMonth = [] for i,hoy in enumerate(HOYs): sunZenithD, sunAzimuthD, sunAltitudeD = lb_photovoltaics.NRELsunPosition(latitude, longitude, timeZone, years[i], months[i], days[i], hours[i]-1) Epoa, Eb, Ed_sky, Eground, AOI_R = lb_photovoltaics.POAirradiance(sunZenithD, sunAzimuthD, srfTiltD, srfAzimuthD, directNormalRadiation[i], diffuseHorizontalRadiation[i], albedoL[i]) Tm, Tcell, Pdc_, Pac = lb_photovoltaics.pvwatts(nameplateDCpowerRating, DCtoACderateFactor, AOI_R, Epoa, Eb, Ed_sky, Eground, moduleType, dryBulbTemperature[i], windSpeed[i], directNormalRadiation[i], diffuseHorizontalRadiation[i]) Epoa = Epoa/1000 # to kWh/m2 ACenergyPerHour.append(Pac) DCenergyPerHour.append(Pdc_) totalRadiationPerHour.append(Epoa) moduleTemperaturePerHour.append(Tm) cellTemperaturePerHour.append(Tcell) for k,item in enumerate(hoyForMonths): if hoy >= hoyForMonths[k]+1 and hoy <= hoyForMonths[k+1]: monthsOfYearHoyPac[k].append(Pac) ACenergyPerMonth = [sum(monthPac) for monthPac in monthsOfYearHoyPac] # in kWh ACenergyPerYear = sum(ACenergyPerMonth) # in kWh for g,sumMonthPac in enumerate(ACenergyPerMonth): MonthPac = (sumMonthPac)/numberOfDaysInThatMonth[g] averageDailyACenergyPerMonth.append(MonthPac) # in kWh/day averageDailyACenergyPerYear = sum(averageDailyACenergyPerMonth)/12 # in kWh/day # adding headings to hourly and monthly lists ACenergyPerMonth = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "AC power output", "kWh", "Monthly-> total", (1, 1, 1), (12, 31, 24)] + ACenergyPerMonth averageDailyACenergyPerMonth = ["key:location/dataType/units/frequency/startsAt/endsAt", locationName, "AC power output", "kWh", "Monthly-> averaged for each day", (1, 1, 1), (12, 31, 24)] + averageDailyACenergyPerMonth return ACenergyPerHour, ACenergyPerMonth, ACenergyPerYear, averageDailyACenergyPerMonth, averageDailyACenergyPerYear, DCenergyPerHour, totalRadiationPerHour, moduleTemperaturePerHour, cellTemperaturePerHour def printOutput(unitAreaConversionFactor, north, latitude, longitude, timeZone, elevation, locationName, albedoL, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, srfTiltD, srfAzimuthD, moduleActiveAreaPercent, moduleType, moduleEfficiency, conditionalStatementForFinalPrint): resultsCompletedMsg = "PVsurface component results successfully completed!" moduleTypesL = ["Glass/cell/glass Close (flush) roof mount", "Glass/cell/polymer sheet Insulated back", "Glass/cell/polymer sheet Open rack", "Glass/cell/glass Open rack"] model = moduleTypesL[moduleType] printOutputMsg = \ """ Input data: Location (): %s Latitude (): %s Longitude (): %s Time zone (-): %s Elevation (m): %s North (): %s Average annual albedo(-): %0.2f Surface percentage used for PV modules (percent): %0.2f Active area Percentage: %0.2f Surface area (m2): %0.2f Surface active area (m2): %0.2f Nameplate DC power rating (kW): %0.2f Overall DC to AC derate factor (-): %0.3f Module type and mounting: %s Module efficiency (percent): %s Array type: fixed tilt Surface azimuth angle (): %0.2f Surface tilt angle (): %0.2f Caclulation based on the following condition: %s """ % (locationName, latitude, longitude, timeZone, elevation, north, sum(albedoL)/8760, PVsurfacePercent, moduleActiveAreaPercent, srfArea, activeArea, nameplateDCpowerRating, DCtoACderateFactor, model, moduleEfficiency, srfAzimuthD, srfTiltD, conditionalStatementForFinalPrint) print resultsCompletedMsg print printOutputMsg level = gh.GH_RuntimeMessageLevel.Warning if sc.sticky.has_key("ladybug_release"): if sc.sticky["ladybug_release"].isCompatible(ghenv.Component): lb_preparation = sc.sticky["ladybug_Preparation"]() lb_photovoltaics = sc.sticky["ladybug_Photovoltaics"]() locationName, latitude, longitude, timeZone, elevation, dryBulbTemperature, windSpeed, directNormalRadiation, diffuseHorizontalRadiation, years, months, days, hours, HOYs, albedoL, validEpwData, printMsg = getEpwData(_epwFile, albedo_) if validEpwData: if _PVsurface: unitConversionFactor = lb_preparation.checkUnits() unitAreaConversionFactor = unitConversionFactor**2 PVsurfaceInputType, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, moduleActiveAreaPercent, moduleType, moduleEfficiency, validPVsurfaceData, printMsg = PVsurfaceInputData(_PVsurface, PVsurfacePercent_, unitAreaConversionFactor, DCtoACderateFactor_, moduleActiveAreaPercent_, moduleType_, moduleEfficiency_) if validPVsurfaceData: validAnnualHourlyData, annualHourlyDataLists, annualHourlyDataListsEpwNames, printMsg = checkAnnualHourlyInputData(annualHourlyData_) if validAnnualHourlyData: validConditionalStatement, weatherPerHourDataConditionalStatementSubLists, conditionalStatementForFinalPrint, printMsg = checkConditionalStatement(conditionalStatement_, annualHourlyDataLists, annualHourlyDataListsEpwNames, [dryBulbTemperature, windSpeed, directNormalRadiation, diffuseHorizontalRadiation], True) if validConditionalStatement: dryBulbTemperatureCondStat, windSpeedCondStat, directNormalRadiationCondStat, diffuseHorizontalRadiationCondStat = weatherPerHourDataConditionalStatementSubLists # all inputs ok if _runIt: srfAzimuthD, surfaceTiltDCalculated = lb_photovoltaics.srfAzimuthAngle(PVsurfaceAzimuthAngle_, PVsurfaceInputType, _PVsurface, latitude) correctedSrfAzimuthD, northDeg, validNorth, printMsg = lb_photovoltaics.correctSrfAzimuthDforNorth(north_, srfAzimuthD) srfTiltD = lb_photovoltaics.srfTiltAngle(PVsurfaceTiltAngle_, surfaceTiltDCalculated, PVsurfaceInputType, _PVsurface, latitude) ACenergyPerHour, ACenergyPerMonth, ACenergyPerYear, averageDailyACenergyPerMonth, averageDailyACenergyPerYear, DCenergyPerHour, totalRadiationPerHour, moduleTemperaturePerHour, cellTemperaturePerHour = main(latitude, longitude, timeZone, locationName, years, months, days, hours, HOYs, nameplateDCpowerRating, DCtoACderateFactor, srfArea, srfTiltD, correctedSrfAzimuthD, moduleType, moduleEfficiency, dryBulbTemperatureCondStat, windSpeedCondStat, directNormalRadiationCondStat, diffuseHorizontalRadiationCondStat, albedoL) printOutput(unitAreaConversionFactor, northDeg, latitude, longitude, timeZone, elevation, locationName, albedoL, nameplateDCpowerRating, srfArea, activeArea, PVsurfacePercent, DCtoACderateFactor, srfTiltD, correctedSrfAzimuthD, moduleActiveAreaPercent, moduleType, moduleEfficiency, conditionalStatementForFinalPrint) PVcoverArea = srfArea; PVcoverActiveArea = activeArea else: print "All inputs are ok. Please set the \"_runIt\" to True, in order to run the Photovoltaics surface component" else: print printMsg ghenv.Component.AddRuntimeMessage(level, printMsg) else: print printMsg ghenv.Component.AddRuntimeMessage(level, printMsg) else: print printMsg ghenv.Component.AddRuntimeMessage(level, printMsg) else: printMsg = "Please input a Surface (not a polysurface) to \"_PVsurface\".\nOr input surface Area in square meters (example: \"100\").\nOr input Nameplate DC power rating in kiloWatts (example: \"4 kw\")." print printMsg ghenv.Component.AddRuntimeMessage(level, printMsg) else: print printMsg ghenv.Component.AddRuntimeMessage(level, printMsg) else: printMsg = "You need a newer version of Ladybug to use this component." + \ "Use updateLadybug component to update userObjects.\n" + \ "If you have already updated userObjects drag the Ladybug_Ladybug component " + \ "into the canvas and try again." print printMsg else: printMsg = "First please let the Ladybug fly..." print printMsg ghenv.Component.AddRuntimeMessage(level, printMsg)
boris-p/ladybug
src/Ladybug_Photovoltaics Surface.py
Python
gpl-3.0
35,501
[ "EPW" ]
12bfbed5a06bad78b6d579055e9ae465876e0e7e8439d3c5e3d3d5f8d7cbd40c
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Grads(AutotoolsPackage): """The Grid Analysis and Display System (GrADS) is an interactive desktop tool that is used for easy access, manipulation, and visualization of earth science data. GrADS has two data models for handling gridded and station data. GrADS supports many data file formats, including binary (stream or sequential), GRIB (version 1 and 2), NetCDF, HDF (version 4 and 5), and BUFR (for station data).""" homepage = "http://cola.gmu.edu/grads/grads.php" url = "ftp://cola.gmu.edu/grads/2.2/grads-2.2.1-src.tar.gz" version('2.2.1', sha256='695e2066d7d131720d598bac0beb61ac3ae5578240a5437401dc0ffbbe516206') variant('geotiff', default=True, description="Enable GeoTIFF support") variant('shapefile', default=True, description="Enable Shapefile support") """ # FIXME: Fails with undeclared functions (tdefi, tdef, ...) in gauser.c variant('hdf5', default=False, description="Enable HDF5 support") variant('hdf4', default=False, description="Enable HDF4 support") variant('netcdf', default=False, description="Enable NetCDF support") depends_on('hdf5', when='+hdf5') depends_on('hdf', when='+hdf4') depends_on('netcdf-c', when='+netcdf') """ depends_on('libgeotiff', when='+geotiff') depends_on('shapelib', when='+shapefile') depends_on('udunits') depends_on('libgd') depends_on('libxmu') depends_on('cairo +X +pdf +fc +ft') depends_on('readline') depends_on('pkgconfig', type='build') def setup_build_environment(self, env): env.set('SUPPLIBS', '/') def setup_run_environment(self, env): env.set('GADDIR', self.prefix.data) @run_after('install') def copy_data(self): with working_dir(self.build_directory): install_tree('data', self.prefix.data) with working_dir(self.package_dir): install('udpt', self.prefix.data) filter_file( r'({lib})', self.prefix.lib, self.prefix.data.udpt ) def configure_args(self): args = [] args.extend(self.with_or_without('geotiff')) return args
rspavel/spack
var/spack/repos/builtin/packages/grads/package.py
Python
lgpl-2.1
2,412
[ "NetCDF" ]
30e8700c64bc0842c4b4a2dc9b1bc6f1c94e57d02b606819c1d28bd931b76277
"""This module provides functions to format several catalogues These are: - eboss_convert_dla - desi_convert_dla - desi_from_truth_to_drq - desi_from_ztarget_to_drq See the respective docstrings for more details """ import os import numpy as np import fitsio from scipy.constants import speed_of_light as speed_light from .utils import userprint def eboss_convert_dla(in_path, drq_filename, out_path, drq_z_key='Z'): """Converts Pasquier Noterdaeme ASCII DLA catalog to a fits file Args: in_path: string Full path filename containing the ASCII DLA catalogue drq_filename: string Filename of the DRQ catalogue out_path: string Full path filename where the fits DLA catalogue will be written to drq_z_key: string Name of the column of DRQ containing the quasrs redshift """ # Read catalogue filename = open(os.path.expandvars(in_path), 'r') for line in filename: cols = line.split() if (len(cols) == 0) or (cols[0][0] == '#') or (cols[0][0] == '-'): continue if cols[0] == 'ThingID': from_key_to_index = {key: index for index, key in enumerate(cols)} dla_cat = {key: [] for key in from_key_to_index} for key in 'MJD-plate-fiber'.split('-'): dla_cat[key] = [] continue for key in from_key_to_index.keys(): value = cols[from_key_to_index[key]] if key == 'MJD-plate-fiber': for key2, value2 in zip('MJD-plate-fiber'.split('-'), value.split('-')): dla_cat[key2] += [value2] dla_cat[key] += [value] filename.close() userprint(("INFO: Found {} DLA from {} " "quasars").format(len(dla_cat['ThingID']), np.unique(dla_cat['ThingID']).size)) # convert Noterdaemem keys to picca keys from_noterdaeme_key_to_picca_key = { 'ThingID': 'THING_ID', 'z_abs': 'Z', 'zqso': 'ZQSO', 'NHI': 'NHI', 'plate': 'PLATE', 'MJD': 'MJD', 'fiber': 'FIBERID', 'RA': 'RA', 'Dec': 'DEC' } # define types from_picca_key_to_type = { 'THING_ID': np.int64, 'Z': np.float64, 'ZQSO': np.float64, 'NHI': np.float64, 'PLATE': np.int64, 'MJD': np.int64, 'FIBERID': np.int64, 'RA': np.float64, 'DEC': np.float64 } # format catalogue cat = { value: np.array(dla_cat[key], dtype=from_picca_key_to_type[value]) for key, value in from_noterdaeme_key_to_picca_key.items() } # apply cuts w = cat['THING_ID'] > 0 userprint(("INFO: Removed {} DLA, because " "THING_ID<=0").format((cat['THING_ID'] <= 0).sum())) w &= cat['Z'] > 0. userprint(("INFO: Removed {} DLA, because " "Z<=0.").format((cat['Z'] <= 0.).sum())) for key in cat: cat[key] = cat[key][w] # update RA, DEC, and Z_QSO from DRQ catalogue hdul = fitsio.FITS(drq_filename) thingid = hdul[1]['THING_ID'][:] ra = hdul[1]['RA'][:] dec = hdul[1]['DEC'][:] z_qso = hdul[1][drq_z_key][:] hdul.close() from_thingid_to_index = {t: index for index, t in enumerate(thingid)} cat['RA'] = np.array( [ra[from_thingid_to_index[t]] for t in cat['THING_ID']]) cat['DEC'] = np.array( [dec[from_thingid_to_index[t]] for t in cat['THING_ID']]) cat['ZQSO'] = np.array( [z_qso[from_thingid_to_index[t]] for t in cat['THING_ID']]) # apply cuts w = cat['RA'] != cat['DEC'] userprint(("INFO: Removed {} DLA, because " "RA==DEC").format((cat['RA'] == cat['DEC']).sum())) w &= cat['RA'] != 0. userprint(("INFO: Removed {} DLA, because " "RA==0").format((cat['RA'] == 0.).sum())) w &= cat['DEC'] != 0. userprint(("INFO: Removed {} DLA, because " "DEC==0").format((cat['DEC'] == 0.).sum())) w &= cat['ZQSO'] > 0. userprint(("INFO: Removed {} DLA, because " "ZQSO<=0.").format((cat['ZQSO'] <= 0.).sum())) for key in cat: cat[key] = cat[key][w] # sort first by redshift w = np.argsort(cat['Z']) for key in cat.keys(): cat[key] = cat[key][w] # then by thingid w = np.argsort(cat['THING_ID']) for key in cat: cat[key] = cat[key][w] # add DLA ID cat['DLAID'] = np.arange(1, cat['Z'].size + 1, dtype=np.int64) for key in ['RA', 'DEC']: cat[key] = cat[key].astype('float64') # Save catalogue results = fitsio.FITS(out_path, 'rw', clobber=True) cols = list(cat.values()) names = list(cat) results.write(cols, names=names, extname='DLACAT') results.close() def desi_convert_dla(in_path, out_path): """Convert a catalog of DLA from a DESI format to the format used by picca Args: in_path: string Full path filename containing the ASCII DLA catalogue out_path: string Full path filename where the fits DLA catalogue will be written to """ from_desi_key_to_picca_key = { 'RA': 'RA', 'DEC': 'DEC', 'Z': 'Z_DLA_RSD', 'ZQSO': 'Z_QSO_RSD', 'NHI': 'N_HI_DLA', 'THING_ID': 'MOCKID', 'DLAID': 'DLAID', 'PLATE': 'MOCKID', 'MJD': 'MOCKID', 'FIBERID': 'MOCKID', } # read catalogue cat = {} hdul = fitsio.FITS(in_path) for key, value in from_desi_key_to_picca_key.items(): cat[key] = hdul['DLACAT'][value][:] hdul.close() userprint(("INFO: Found {} DLA from {} " "quasars").format(cat['Z'].size, np.unique(cat['THING_ID']).size)) # sort by THING_ID w = np.argsort(cat['THING_ID']) for key in cat: cat[key] = cat[key][w] for key in ['RA', 'DEC']: cat[key] = cat[key].astype('float64') # save results results = fitsio.FITS(out_path, 'rw', clobber=True) cols = list(cat.values()) names = list(cat) results.write(cols, names=names, extname='DLACAT') results.close() def desi_from_truth_to_drq(truth_filename, targets_filename, out_path, spec_type="QSO"): """Transform a desi truth.fits file and a desi targets.fits into a drq like file Args: truth_filename: string Filename of the truth.fits file targets_filename: string Filename of the desi targets.fits file out_path: string Full path filename where the fits catalogue will be written to spec_type: string Spectral type of the objects to include in the catalogue """ # read truth table hdul = fitsio.FITS(truth_filename) # apply cuts w = np.ones(hdul[1]['TARGETID'][:].size).astype(bool) userprint(" start : nb object in cat = {}".format(w.sum())) w &= np.char.strip(hdul[1]['TRUESPECTYPE'][:].astype(str)) == spec_type userprint(" and TRUESPECTYPE=={} : nb object in cat = {}".format( spec_type, w.sum())) # load the arrays thingid = hdul[1]['TARGETID'][:][w] z_qso = hdul[1]['TRUEZ'][:][w] hdul.close() ra = np.zeros(thingid.size) dec = np.zeros(thingid.size) plate = thingid mjd = thingid fiberid = thingid ### Get RA and DEC from targets hdul = fitsio.FITS(targets_filename) thingid_targets = hdul[1]['TARGETID'][:] ra_targets = hdul[1]['RA'][:].astype('float64') dec_targets = hdul[1]['DEC'][:].astype('float64') hdul.close() from_targetid_to_index = {} for index, t in enumerate(thingid_targets): from_targetid_to_index[t] = index keys_from_targetid_to_index = from_targetid_to_index.keys() for index, t in enumerate(thingid): if t not in keys_from_targetid_to_index: continue index2 = from_targetid_to_index[t] ra[index] = ra_targets[index2] dec[index] = dec_targets[index2] # apply cuts if (ra == 0.).sum() != 0 or (dec == 0.).sum() != 0: w = ra != 0. w &= dec != 0. userprint((" and RA and DEC : nb object in cat = " "{}").format(w.sum())) ra = ra[w] dec = dec[w] z_qso = z_qso[w] thingid = thingid[w] plate = plate[w] mjd = mjd[w] fiberid = fiberid[w] # save catalogue results = fitsio.FITS(out_path, 'rw', clobber=True) cols = [ra, dec, thingid, plate, mjd, fiberid, z_qso] names = ['RA', 'DEC', 'THING_ID', 'PLATE', 'MJD', 'FIBERID', 'Z'] results.write(cols, names=names, extname='CAT') results.close() def desi_from_ztarget_to_drq(in_path, out_path, spec_type='QSO', downsampling_z_cut=None, downsampling_num=None, gauss_redshift_error=None): """Transforms a catalog of object in desi format to a catalog in DRQ format Args: in_path: string Full path filename containing the catalogue of objects out_path: string Full path filename where the fits DLA catalogue will be written to spec_type: string Spectral type of the objects to include in the catalogue downsampling_z_cut: float or None - default: None Minimum redshift to downsample the data. 'None' for no downsampling downsampling_num: int Target number of object above redshift downsampling-z-cut. 'None' for no downsampling gauss_redshift_error: int Gaussian random error to be added to redshift (in km/s) Mimics uncertainties in estimation of z in classifiers 'None' for no error """ ## Info of the primary observation hdul = fitsio.FITS(in_path) spec_type_list = np.char.strip(hdul[1]['SPECTYPE'][:].astype(str)) # apply cuts userprint((" start : nb object in cat = " "{}").format(spec_type_list.size)) w = hdul[1]['ZWARN'][:] == 0. userprint(' and zwarn==0 : nb object in cat = {}'.format(w.sum())) w &= spec_type_list == spec_type userprint(' and spectype=={} : nb object in cat = {}'.format( spec_type, w.sum())) # load the arrays cat = {} from_desi_key_to_picca_key = { 'RA': 'RA', 'DEC': 'DEC', 'Z': 'Z', 'THING_ID': 'TARGETID', 'PLATE': 'TARGETID', 'MJD': 'TARGETID', 'FIBERID': 'TARGETID' } for key, value in from_desi_key_to_picca_key.items(): cat[key] = hdul[1][value][:][w] hdul.close() for key in ['RA', 'DEC']: cat[key] = cat[key].astype('float64') # apply error to z if gauss_redshift_error is not None: SPEED_LIGHT = speed_light/1000. # [km/s] np.random.seed(0) dz = gauss_redshift_error/SPEED_LIGHT*(1.+cat['Z'])*np.random.normal(0, 1, cat['Z'].size) cat['Z'] += dz # apply downsampling if downsampling_z_cut is not None and downsampling_num is not None: if cat['RA'].size < downsampling_num: userprint(("WARNING:: Trying to downsample, when nb cat = {} and " "nb downsampling = {}").format(cat['RA'].size, downsampling_num)) else: z_cut_num = (cat['Z'] > downsampling_z_cut).sum() select_fraction = (downsampling_num / z_cut_num) if select_fraction < 1.0: np.random.seed(0) w = np.random.choice(np.arange(cat['RA'].size), size=int(cat['RA'].size * select_fraction), replace=False) for key in cat: cat[key] = cat[key][w] userprint((" and downsampling : nb object in cat = {}, nb z > " "{} = {}").format(cat['RA'].size, downsampling_z_cut, z_cut_num)) else: userprint(("WARNING:: Trying to downsample, when nb QSOs with " "z > {} = {} and downsampling = {}").format (downsampling_z_cut, z_cut_num, downsampling_num)) # sort by THING_ID w = np.argsort(cat['THING_ID']) for key in cat: cat[key] = cat[key][w] # save catalogue results = fitsio.FITS(out_path, 'rw', clobber=True) cols = list(cat.values()) names = list(cat) results.write(cols, names=names, extname='CAT') results.close()
igmhub/picca
py/picca/converters.py
Python
gpl-3.0
12,823
[ "Gaussian" ]
275f72e5d8a7dc0055e090b9432e08557fea7dc21ceccf113f26be2e16a424f1
""" Add UUID column to dataset table """ from sqlalchemy import * from sqlalchemy.orm import * from migrate import * from migrate.changeset import * from galaxy.model.custom_types import UUIDType import logging log = logging.getLogger( __name__ ) dataset_uuid_column = Column( "uuid", UUIDType, nullable=True ) def display_migration_details(): print "" print "This migration adds uuid column to dataset table" def upgrade(migrate_engine): print __doc__ metadata = MetaData() metadata.bind = migrate_engine metadata.reflect() # Add the uuid colum to the dataset table try: dataset_table = Table( "dataset", metadata, autoload=True ) dataset_uuid_column.create( dataset_table ) assert dataset_uuid_column is dataset_table.c.uuid except Exception, e: print str(e) log.error( "Adding column 'uuid' to dataset table failed: %s" % str( e ) ) return def downgrade(migrate_engine): metadata = MetaData() metadata.bind = migrate_engine metadata.reflect() # Drop the dataset table's uuid column. try: dataset_table = Table( "dataset", metadata, autoload=True ) dataset_uuid = dataset_table.c.uuid dataset_uuid.drop() except Exception, e: log.debug( "Dropping 'uuid' column from dataset table failed: %s" % ( str( e ) ) )
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/lib/galaxy/model/migrate/versions/0110_add_dataset_uuid.py
Python
gpl-3.0
1,363
[ "Galaxy" ]
4c52aa9e05f392b31ff4bd837090d669460d6540928434e22583d1cb2125e5a0
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Converter for slice operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gast from tensorflow.contrib.autograph.core import converter from tensorflow.contrib.autograph.lang import directives from tensorflow.contrib.autograph.pyct import templates class SliceTransformer(converter.Base): """Converts slicing operations to their TF counterpart. Currently, relying on the default slice operator that Tensor uses is insufficient, because TensorArray and tensor lists use dedicated index read and write functions. """ def _process_single_assignment(self, target, value): if not isinstance(target, gast.Subscript): return None template = """ target = ag__.set_item(target, key, item) """ return templates.replace( template, target=target.value, key=target.slice, item=value) def visit_Assign(self, node): node = self.generic_visit(node) # TODO(mdan): Support unpackings and multiple assignments. if len(node.targets) != 1: raise NotImplementedError('multiple assignment') replacement = self._process_single_assignment(node.targets[0], node.value) if replacement is not None: return replacement return node def visit_Subscript(self, node): node = self.generic_visit(node) if not isinstance(node.slice, gast.Index): return node if not isinstance(node.ctx, gast.Load): # Index writes are handled at a higher level, one at which the rvalue is # also available. return node dtype = self.get_definition_directive( node.value, directives.set_element_type, 'dtype', default=templates.replace_as_expression('None')) template = """ ag__.get_item( target, key, opts=ag__.GetItemOpts(element_dtype=dtype)) """ return templates.replace_as_expression( template, target=node.value, key=node.slice, dtype=dtype) def transform(node, ctx): return SliceTransformer(ctx).visit(node)
jart/tensorflow
tensorflow/contrib/autograph/converters/slices.py
Python
apache-2.0
2,760
[ "VisIt" ]
0b2c37fcd8817dbf8f372ff46ecde18ebbd1f1df62ecc41899e2ac044076d8ae
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDAnalysis --- https://www.mdanalysis.org # Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # doi: 10.25080/majora-629e541a-00e # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # import MDAnalysis as mda import MDAnalysis.analysis.encore as encore import importlib import tempfile import numpy as np import sys import os import warnings import pytest from numpy.testing import assert_equal, assert_almost_equal from MDAnalysisTests.datafiles import DCD, DCD2, PSF, TPR, XTC from MDAnalysisTests import block_import import MDAnalysis.analysis.rms as rms import MDAnalysis.analysis.align as align import MDAnalysis.analysis.encore.confdistmatrix as confdistmatrix def function(x): return x**2 class TestEncore(object): @pytest.fixture(scope='class') def ens1_template(self): template = mda.Universe(PSF, DCD) template.transfer_to_memory(step=5) return template @pytest.fixture(scope='class') def ens2_template(self): template = mda.Universe(PSF, DCD2) template.transfer_to_memory(step=5) return template @pytest.fixture() def ens1(self, ens1_template): return mda.Universe( ens1_template.filename, ens1_template.trajectory.timeseries(order='fac'), format=mda.coordinates.memory.MemoryReader) @pytest.fixture() def ens2(self, ens2_template): return mda.Universe( ens2_template.filename, ens2_template.trajectory.timeseries(order='fac'), format=mda.coordinates.memory.MemoryReader) def test_triangular_matrix(self): scalar = 2 size = 3 expected_value = 1.984 filename = tempfile.mktemp()+".npz" triangular_matrix = encore.utils.TriangularMatrix(size = size) triangular_matrix[0,1] = expected_value assert_equal(triangular_matrix[0,1], expected_value, err_msg="Data error in TriangularMatrix: read/write are not consistent") assert_equal(triangular_matrix[0,1], triangular_matrix[1,0], err_msg="Data error in TriangularMatrix: matrix non symmetrical") triangular_matrix.savez(filename) triangular_matrix_2 = encore.utils.TriangularMatrix(size = size, loadfile = filename) assert_equal(triangular_matrix_2[0,1], expected_value, err_msg="Data error in TriangularMatrix: loaded matrix non symmetrical") triangular_matrix_3 = encore.utils.TriangularMatrix(size = size) triangular_matrix_3.loadz(filename) assert_equal(triangular_matrix_3[0,1], expected_value, err_msg="Data error in TriangularMatrix: loaded matrix non symmetrical") incremented_triangular_matrix = triangular_matrix + scalar assert_equal(incremented_triangular_matrix[0,1], expected_value + scalar, err_msg="Error in TriangularMatrix: addition of scalar gave" "inconsistent results") triangular_matrix += scalar assert_equal(triangular_matrix[0,1], expected_value + scalar, err_msg="Error in TriangularMatrix: addition of scalar gave" "inconsistent results") multiplied_triangular_matrix_2 = triangular_matrix_2 * scalar assert_equal(multiplied_triangular_matrix_2[0,1], expected_value * scalar, err_msg="Error in TriangularMatrix: multiplication by scalar gave" "inconsistent results") triangular_matrix_2 *= scalar assert_equal(triangular_matrix_2[0,1], expected_value * scalar, err_msg="Error in TriangularMatrix: multiplication by scalar gave\ inconsistent results") @pytest.mark.xfail(os.name == 'nt', reason="Not yet supported on Windows.") def test_parallel_calculation(self): arguments = [tuple([i]) for i in np.arange(0,100)] parallel_calculation = encore.utils.ParallelCalculation(function=function, n_jobs=4, args=arguments) results = parallel_calculation.run() for i,r in enumerate(results): assert_equal(r[1], arguments[i][0]**2, err_msg="Unexpeted results from ParallelCalculation") def test_rmsd_matrix_with_superimposition(self, ens1): conf_dist_matrix = encore.confdistmatrix.conformational_distance_matrix( ens1, encore.confdistmatrix.set_rmsd_matrix_elements, select="name CA", pairwise_align=True, weights='mass', n_jobs=1) reference = rms.RMSD(ens1, select="name CA") reference.run() for i,rmsd in enumerate(reference.rmsd): assert_almost_equal(conf_dist_matrix[0,i], rmsd[2], decimal=3, err_msg = "calculated RMSD values differ from the reference implementation") def test_rmsd_matrix_with_superimposition_custom_weights(self, ens1): conf_dist_matrix = encore.confdistmatrix.conformational_distance_matrix( ens1, encore.confdistmatrix.set_rmsd_matrix_elements, select="name CA", pairwise_align=True, weights='mass', n_jobs=1) conf_dist_matrix_custom = encore.confdistmatrix.conformational_distance_matrix( ens1, encore.confdistmatrix.set_rmsd_matrix_elements, select="name CA", pairwise_align=True, weights=(ens1.select_atoms('name CA').masses, ens1.select_atoms('name CA').masses), n_jobs=1) for i in range(conf_dist_matrix_custom.size): assert_almost_equal(conf_dist_matrix_custom[0, i], conf_dist_matrix[0, i]) def test_rmsd_matrix_without_superimposition(self, ens1): selection_string = "name CA" selection = ens1.select_atoms(selection_string) reference_rmsd = [] coordinates = ens1.trajectory.timeseries(selection, order='fac') for coord in coordinates: reference_rmsd.append(rms.rmsd(coordinates[0], coord, superposition=False)) confdist_matrix = encore.confdistmatrix.conformational_distance_matrix( ens1, encore.confdistmatrix.set_rmsd_matrix_elements, select=selection_string, pairwise_align=False, weights='mass', n_jobs=1) print (repr(confdist_matrix.as_array()[0,:])) assert_almost_equal(confdist_matrix.as_array()[0,:], reference_rmsd, decimal=3, err_msg="calculated RMSD values differ from reference") def test_ensemble_superimposition(self): aligned_ensemble1 = mda.Universe(PSF, DCD) align.AlignTraj(aligned_ensemble1, aligned_ensemble1, select="name CA", in_memory=True).run() aligned_ensemble2 = mda.Universe(PSF, DCD) align.AlignTraj(aligned_ensemble2, aligned_ensemble2, select="name *", in_memory=True).run() rmsfs1 = rms.RMSF(aligned_ensemble1.select_atoms('name *')) rmsfs1.run() rmsfs2 = rms.RMSF(aligned_ensemble2.select_atoms('name *')) rmsfs2.run() assert sum(rmsfs1.rmsf) > sum(rmsfs2.rmsf),"Ensemble aligned on all " \ "atoms should have lower full-atom RMSF than ensemble aligned on only CAs." def test_ensemble_superimposition_to_reference_non_weighted(self): aligned_ensemble1 = mda.Universe(PSF, DCD) align.AlignTraj(aligned_ensemble1, aligned_ensemble1, select="name CA", in_memory=True).run() aligned_ensemble2 = mda.Universe(PSF, DCD) align.AlignTraj(aligned_ensemble2, aligned_ensemble2, select="name *", in_memory=True).run() rmsfs1 = rms.RMSF(aligned_ensemble1.select_atoms('name *')) rmsfs1.run() rmsfs2 = rms.RMSF(aligned_ensemble2.select_atoms('name *')) rmsfs2.run() assert sum(rmsfs1.rmsf) > sum(rmsfs2.rmsf), "Ensemble aligned on all " \ "atoms should have lower full-atom RMSF than ensemble aligned on only CAs." def test_hes_to_self(self, ens1): results, details = encore.hes([ens1, ens1]) result_value = results[0, 1] expected_value = 0. assert_almost_equal(result_value, expected_value, err_msg="Harmonic Ensemble Similarity to itself not zero: {0:f}".format(result_value)) def test_hes(self, ens1, ens2): results, details = encore.hes([ens1, ens2], weights='mass') result_value = results[0, 1] min_bound = 1E5 assert result_value > min_bound, "Unexpected value for Harmonic " \ "Ensemble Similarity: {0:f}. Expected {1:f}.".format(result_value, min_bound) def test_hes_custom_weights(self, ens1, ens2): results, details = encore.hes([ens1, ens2], weights='mass') results_custom, details_custom = encore.hes([ens1, ens2], weights=(ens1.select_atoms('name CA').masses, ens2.select_atoms('name CA').masses)) result_value = results[0, 1] result_value_custom = results_custom[0, 1] assert_almost_equal(result_value, result_value_custom) def test_hes_align(self, ens1, ens2): # This test is massively sensitive! # Get 5260 when masses were float32? results, details = encore.hes([ens1, ens2], align=True) result_value = results[0,1] expected_value = 2047.05 assert_almost_equal(result_value, expected_value, decimal=-3, err_msg="Unexpected value for Harmonic Ensemble Similarity: {0:f}. Expected {1:f}.".format(result_value, expected_value)) def test_ces_to_self(self, ens1): results, details = \ encore.ces([ens1, ens1], clustering_method=encore.AffinityPropagationNative(preference = -3.0)) result_value = results[0,1] expected_value = 0. assert_almost_equal(result_value, expected_value, err_msg="ClusteringEnsemble Similarity to itself not zero: {0:f}".format(result_value)) def test_ces(self, ens1, ens2): results, details = encore.ces([ens1, ens2]) result_value = results[0,1] expected_value = 0.51 assert_almost_equal(result_value, expected_value, decimal=2, err_msg="Unexpected value for Cluster Ensemble Similarity: {0:f}. Expected {1:f}.".format(result_value, expected_value)) def test_dres_to_self(self, ens1): results, details = encore.dres([ens1, ens1]) result_value = results[0,1] expected_value = 0. assert_almost_equal(result_value, expected_value, decimal=2, err_msg="Dim. Reduction Ensemble Similarity to itself not zero: {0:f}".format(result_value)) def test_dres(self, ens1, ens2): results, details = encore.dres([ens1, ens2], select="name CA and resnum 1-10") result_value = results[0,1] upper_bound = 0.6 assert result_value < upper_bound, "Unexpected value for Dim. " \ "reduction Ensemble Similarity: {0:f}. Expected {1:f}.".format(result_value, upper_bound) @pytest.mark.xfail # sporadically fails, see Issue #2158 def test_dres_without_superimposition(self, ens1, ens2): distance_matrix = encore.get_distance_matrix( encore.merge_universes([ens1, ens2]), superimpose=False) results, details = encore.dres([ens1, ens2], distance_matrix = distance_matrix) result_value = results[0,1] expected_value = 0.68 assert_almost_equal(result_value, expected_value, decimal=1, err_msg="Unexpected value for Dim. reduction Ensemble Similarity: {0:f}. Expected {1:f}.".format(result_value, expected_value)) def test_ces_convergence(self, ens1): expected_values = [0.3443593, 0.1941854, 0.06857104, 0.] results = encore.ces_convergence(ens1, 5) for i,ev in enumerate(expected_values): assert_almost_equal(ev, results[i], decimal=2, err_msg="Unexpected value for Clustering Ensemble similarity in convergence estimation") def test_dres_convergence(self, ens1): # Due to encore.dres_convergence() involving random numbers, the # following assertion is allowed to fail once. This significantly # reduces the probability of a random test failure. expected_values = [0.3, 0.] results = encore.dres_convergence(ens1, 10) try: assert_almost_equal(results[:,0], expected_values, decimal=1) except AssertionError: # Random test failure is very rare, but repeating the failed test # just once would only assert that the test passes with 50% # probability. To be a little safer, we raise a warning and repeat # the test 10 times: warnings.warn(message="Test 'test_dres_convergence' failed, " "repeating test 10 times.", category=RuntimeWarning) for i in range(10): results = encore.dres_convergence(ens1, 10) assert_almost_equal(results[:,0], expected_values, decimal=1, err_msg="Unexpected value for Dim. " "reduction Ensemble similarity in " "convergence estimation") @pytest.mark.xfail # sporadically fails, see Issue #2158 def test_hes_error_estimation(self, ens1): expected_average = 10 expected_stdev = 12 averages, stdevs = encore.hes([ens1, ens1], estimate_error = True, bootstrapping_samples=10, select="name CA and resnum 1-10") average = averages[0,1] stdev = stdevs[0,1] assert_almost_equal(average, expected_average, decimal=-2, err_msg="Unexpected average value for bootstrapped samples in Harmonic Ensemble imilarity") assert_almost_equal(stdev, expected_stdev, decimal=-2, err_msg="Unexpected standard daviation for bootstrapped samples in Harmonic Ensemble imilarity") def test_ces_error_estimation(self, ens1): expected_average = 0.03 expected_stdev = 0.31 averages, stdevs = encore.ces([ens1, ens1], estimate_error = True, bootstrapping_samples=10, clustering_method=encore.AffinityPropagationNative(preference=-2.0), select="name CA and resnum 1-10") average = averages[0,1] stdev = stdevs[0,1] assert_almost_equal(average, expected_average, decimal=1, err_msg="Unexpected average value for bootstrapped samples in Clustering Ensemble similarity") assert_almost_equal(stdev, expected_stdev, decimal=0, err_msg="Unexpected standard daviation for bootstrapped samples in Clustering Ensemble similarity") def test_ces_error_estimation_ensemble_bootstrap(self, ens1): # Error estimation using a method that does not take a distance # matrix as input, and therefore relies on bootstrapping the ensembles # instead pytest.importorskip('sklearn') expected_average = 0.03 expected_stdev = 0.02 averages, stdevs = encore.ces([ens1, ens1], estimate_error = True, bootstrapping_samples=10, clustering_method=encore.KMeans(n_clusters=2), select="name CA and resnum 1-10") average = averages[0,1] stdev = stdevs[0,1] assert_almost_equal(average, expected_average, decimal=1, err_msg="Unexpected average value for bootstrapped samples in Clustering Ensemble similarity") assert_almost_equal(stdev, expected_stdev, decimal=1, err_msg="Unexpected standard daviation for bootstrapped samples in Clustering Ensemble similarity") def test_dres_error_estimation(self, ens1): average_upper_bound = 0.3 stdev_upper_bound = 0.2 averages, stdevs = encore.dres([ens1, ens1], estimate_error = True, bootstrapping_samples=10, select="name CA and resnum 1-10") average = averages[0,1] stdev = stdevs[0,1] assert average < average_upper_bound, "Unexpected average value for " \ "bootstrapped samples in Dim. reduction Ensemble similarity" assert stdev < stdev_upper_bound, "Unexpected standard deviation for" \ " bootstrapped samples in Dim. reduction Ensemble imilarity" class TestEncoreClustering(object): @pytest.fixture(scope='class') def ens1_template(self): template = mda.Universe(PSF, DCD) template.transfer_to_memory(step=5) return template @pytest.fixture(scope='class') def ens2_template(self): template = mda.Universe(PSF, DCD2) template.transfer_to_memory(step=5) return template @pytest.fixture(scope='class') def cc(self): return encore.ClusterCollection([1, 1, 1, 3, 3, 5, 5, 5]) @pytest.fixture(scope='class') def cluster(self): return encore.Cluster(elem_list=np.array([0, 1, 2]), centroid=1) @pytest.fixture() def ens1(self, ens1_template): return mda.Universe( ens1_template.filename, ens1_template.trajectory.timeseries(order='fac'), format=mda.coordinates.memory.MemoryReader) @pytest.fixture() def ens2(self, ens2_template): return mda.Universe( ens2_template.filename, ens2_template.trajectory.timeseries(order='fac'), format=mda.coordinates.memory.MemoryReader) def test_clustering_one_ensemble(self, ens1): cluster_collection = encore.cluster(ens1) expected_value = 7 assert len(cluster_collection) == expected_value, "Unexpected " \ "results: {0}".format(cluster_collection) def test_clustering_two_ensembles(self, ens1, ens2): cluster_collection = encore.cluster([ens1, ens2]) expected_value = 14 assert len(cluster_collection) == expected_value, "Unexpected " \ "results: {0}".format(cluster_collection) def test_clustering_three_ensembles_two_identical(self, ens1, ens2): cluster_collection = encore.cluster([ens1, ens2, ens1]) expected_value = 40 assert len(cluster_collection) == expected_value, "Unexpected result:" \ " {0}".format(cluster_collection) def test_clustering_two_methods(self, ens1): cluster_collection = encore.cluster( [ens1], method=[encore.AffinityPropagationNative(), encore.AffinityPropagationNative()]) assert len(cluster_collection[0]) == len(cluster_collection[1]), \ "Unexpected result: {0}".format(cluster_collection) def test_clustering_AffinityPropagationNative_direct(self, ens1): method = encore.AffinityPropagationNative() distance_matrix = encore.get_distance_matrix(ens1) cluster_assignment = method(distance_matrix) expected_value = 7 assert len(set(cluster_assignment)) == expected_value, \ "Unexpected result: {0}".format(cluster_assignment) def test_clustering_AffinityPropagation_direct(self, ens1): pytest.importorskip('sklearn') method = encore.AffinityPropagation(random_state=0) distance_matrix = encore.get_distance_matrix(ens1) cluster_assignment = method(distance_matrix) expected_value = 7 assert len(set(cluster_assignment)) == expected_value, \ "Unexpected result: {0}".format(cluster_assignment) def test_clustering_KMeans_direct(self, ens1): pytest.importorskip('sklearn') clusters = 10 method = encore.KMeans(clusters) coordinates = ens1.trajectory.timeseries(order='fac') coordinates = np.reshape(coordinates, (coordinates.shape[0], -1)) cluster_assignment = method(coordinates) assert len(set(cluster_assignment)) == clusters, \ "Unexpected result: {0}".format(cluster_assignment) def test_clustering_DBSCAN_direct(self, ens1): pytest.importorskip('sklearn') method = encore.DBSCAN(eps=0.5, min_samples=2) distance_matrix = encore.get_distance_matrix(ens1) cluster_assignment = method(distance_matrix) expected_value = 2 assert len(set(cluster_assignment)) == expected_value, \ "Unexpected result: {0}".format(cluster_assignment) def test_clustering_two_different_methods(self, ens1): pytest.importorskip('sklearn') cluster_collection = encore.cluster( [ens1], method=[encore.AffinityPropagation(preference=-7.5, random_state=0), encore.DBSCAN(min_samples=2)]) assert len(cluster_collection[0]) == len(cluster_collection[1]), \ "Unexpected result: {0}".format(cluster_collection) def test_clustering_method_w_no_distance_matrix(self, ens1): pytest.importorskip('sklearn') cluster_collection = encore.cluster( [ens1], method=encore.KMeans(10)) assert len(cluster_collection) == 10, \ "Unexpected result: {0}".format(cluster_collection) def test_clustering_two_methods_one_w_no_distance_matrix(self, ens1): pytest.importorskip('sklearn') cluster_collection = encore.cluster( [ens1], method=[encore.KMeans(17), encore.AffinityPropagationNative()]) assert len(cluster_collection[0]) == len(cluster_collection[0]), \ "Unexpected result: {0}".format(cluster_collection) def test_sklearn_affinity_propagation(self, ens1): pytest.importorskip('sklearn') cc1 = encore.cluster([ens1]) cc2 = encore.cluster([ens1], method=encore.AffinityPropagation(random_state=0)) assert len(cc1) == len(cc2), \ "Native and sklearn implementations of affinity "\ "propagation don't agree: mismatch in number of "\ "clusters: {0} {1}".format(len(cc1), len(cc2)) def test_ClusterCollection_init(self, cc): assert np.all(cc.clusters[0].elements == [0, 1, 2]) and \ np.all(cc.clusters[1].elements == [3, 4 ]) and \ np.all(cc.clusters[2].elements == [5, 6, 7]) and \ cc.clusters[0].centroid == 1 and \ cc.clusters[1].centroid == 3 and \ cc.clusters[2].centroid == 5, \ "ClusterCollection was not constructed correctly" def test_Cluster_init(self, cluster): assert np.all(cluster.elements == [0, 1, 2]) and \ cluster.centroid == 1, \ "Cluster was not constructed correctly" def test_ClusterCollection_get_ids(self, cc): assert cc.get_ids() == [0, 1, 2], \ "ClusterCollection ids aren't as expected" def test_ClusterCollection_get_centroids(self, cc): assert cc.get_centroids() == [1, 3, 5], \ "ClusterCollection centroids aren't as expected" def test_Cluster_add_metadata(self, cluster): metadata = cluster.elements*10 cluster.add_metadata('test', metadata) assert np.all(cluster.metadata['test'] == metadata), \ "Cluster metadata isn't as expected" class TestEncoreClusteringSklearn(object): """The tests in this class were duplicated from the affinity propagation tests in scikit-learn""" n_clusters = 3 @pytest.fixture() def distance_matrix(self): X = np.array([[8.73101582, 8.85617874], [11.61311169, 11.58774351], [10.86083514, 11.06253959], [9.45576027, 8.50606967], [11.30441509, 11.04867001], [8.63708065, 9.02077816], [8.34792066, 9.1851129], [11.06197897, 11.15126501], [11.24563175, 9.36888267], [10.83455241, 8.70101808], [11.49211627, 11.48095194], [10.6448857, 10.20768141], [10.491806, 9.38775868], [11.08330999, 9.39065561], [10.83872922, 9.48897803], [11.37890079, 8.93799596], [11.70562094, 11.16006288], [10.95871246, 11.1642394], [11.59763163, 10.91793669], [11.05761743, 11.5817094], [8.35444086, 8.91490389], [8.79613913, 8.82477028], [11.00420001, 9.7143482], [11.90790185, 10.41825373], [11.39149519, 11.89635728], [8.31749192, 9.78031016], [11.59530088, 9.75835567], [11.17754529, 11.13346973], [11.01830341, 10.92512646], [11.75326028, 8.46089638], [11.74702358, 9.36241786], [10.53075064, 9.77744847], [8.67474149, 8.30948696], [11.05076484, 9.16079575], [8.79567794, 8.52774713], [11.18626498, 8.38550253], [10.57169895, 9.42178069], [8.65168114, 8.76846013], [11.12522708, 10.6583617], [8.87537899, 9.02246614], [9.29163622, 9.05159316], [11.38003537, 10.93945712], [8.74627116, 8.85490353], [10.65550973, 9.76402598], [8.49888186, 9.31099614], [8.64181338, 9.154761], [10.84506927, 10.8790789], [8.98872711, 9.17133275], [11.7470232, 10.60908885], [10.89279865, 9.32098256], [11.14254656, 9.28262927], [9.02660689, 9.12098876], [9.16093666, 8.72607596], [11.47151183, 8.92803007], [11.76917681, 9.59220592], [9.97880407, 11.26144744], [8.58057881, 8.43199283], [10.53394006, 9.36033059], [11.34577448, 10.70313399], [9.07097046, 8.83928763]]) XX = np.einsum('ij,ij->i', X, X)[:, np.newaxis] YY = XX.T distances = np.dot(X, X.T) distances *= -2 distances += XX distances += YY np.maximum(distances, 0, out=distances) distances.flat[::distances.shape[0] + 1] = 0.0 dimension = len(distances) distance_matrix = encore.utils.TriangularMatrix(len(distances)) for i in range(dimension): for j in range(i, dimension): distance_matrix[i, j] = distances[i, j] return distance_matrix def test_one(self, distance_matrix): preference = -float(np.median(distance_matrix.as_array()) * 10.) clustering_method = encore.AffinityPropagationNative(preference=preference) ccs = encore.cluster(None, distance_matrix=distance_matrix, method=clustering_method) assert self.n_clusters == len(ccs), \ "Basic clustering test failed to give the right"\ "number of clusters: {0} vs {1}".format(self.n_clusters, len(ccs)) class TestEncoreDimensionalityReduction(object): @pytest.fixture(scope='class') def ens1_template(self): template = mda.Universe(PSF, DCD) template.transfer_to_memory(step=5) return template @pytest.fixture(scope='class') def ens2_template(self): template = mda.Universe(PSF, DCD2) template.transfer_to_memory(step=5) return template @pytest.fixture() def ens1(self, ens1_template): return mda.Universe( ens1_template.filename, ens1_template.trajectory.timeseries(order='fac'), format=mda.coordinates.memory.MemoryReader) @pytest.fixture() def ens2(self, ens2_template): return mda.Universe( ens2_template.filename, ens2_template.trajectory.timeseries(order='fac'), format=mda.coordinates.memory.MemoryReader) def test_dimensionality_reduction_one_ensemble(self, ens1): dimension = 2 coordinates, details = encore.reduce_dimensionality(ens1) assert_equal(coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}".format(coordinates)) def test_dimensionality_reduction_two_ensembles(self, ens1, ens2): dimension = 2 coordinates, details = \ encore.reduce_dimensionality([ens1, ens2]) assert_equal(coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}".format(coordinates)) def test_dimensionality_reduction_three_ensembles_two_identical(self, ens1, ens2): coordinates, details = \ encore.reduce_dimensionality([ens1, ens2, ens1]) coordinates_ens1 = coordinates[:,np.where(details["ensemble_membership"]==1)] coordinates_ens3 = coordinates[:,np.where(details["ensemble_membership"]==3)] assert_almost_equal(coordinates_ens1, coordinates_ens3, decimal=0, err_msg="Unexpected result in dimensionality reduction: {0}".format(coordinates)) def test_dimensionality_reduction_specified_dimension(self, ens1, ens2): dimension = 3 coordinates, details = encore.reduce_dimensionality( [ens1, ens2], method=encore.StochasticProximityEmbeddingNative(dimension=dimension)) assert_equal(coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}".format(coordinates)) def test_dimensionality_reduction_SPENative_direct(self, ens1): dimension = 2 method = encore.StochasticProximityEmbeddingNative(dimension=dimension) distance_matrix = encore.get_distance_matrix(ens1) coordinates, details = method(distance_matrix) assert_equal(coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}".format( coordinates)) def test_dimensionality_reduction_PCA_direct(self, ens1): pytest.importorskip('sklearn') dimension = 2 method = encore.PrincipalComponentAnalysis(dimension=dimension) coordinates = ens1.trajectory.timeseries(order='fac') coordinates = np.reshape(coordinates, (coordinates.shape[0], -1)) coordinates, details = method(coordinates) assert_equal(coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}".format( coordinates)) def test_dimensionality_reduction_different_method(self, ens1, ens2): pytest.importorskip('sklearn') dimension = 3 coordinates, details = \ encore.reduce_dimensionality( [ens1, ens2], method=encore.PrincipalComponentAnalysis(dimension=dimension)) assert_equal(coordinates.shape[0], dimension, err_msg="Unexpected result in dimensionality reduction: {0}".format(coordinates)) def test_dimensionality_reduction_two_methods(self, ens1, ens2): dims = [2,3] coordinates, details = \ encore.reduce_dimensionality( [ens1, ens2], method=[encore.StochasticProximityEmbeddingNative(dims[0]), encore.StochasticProximityEmbeddingNative(dims[1])]) assert_equal(coordinates[1].shape[0], dims[1]) def test_dimensionality_reduction_two_different_methods(self, ens1, ens2): pytest.importorskip('sklearn') dims = [2,3] coordinates, details = \ encore.reduce_dimensionality( [ens1, ens2], method=[encore.StochasticProximityEmbeddingNative(dims[0]), encore.PrincipalComponentAnalysis(dims[1])]) assert_equal(coordinates[1].shape[0], dims[1]) class TestEncoreConfDistMatrix(object): def test_get_distance_matrix(self): # Issue #1324 u = mda.Universe(TPR,XTC) dm = confdistmatrix.get_distance_matrix(u) class TestEncoreImportWarnings(object): @block_import('sklearn') def _check_sklearn_import_warns(self, package, recwarn): for mod in list(sys.modules): # list as we're changing as we iterate if 'encore' in mod: sys.modules.pop(mod, None) warnings.simplefilter('always') # assert_warns(ImportWarning, importlib.import_module, package) importlib.import_module(package) assert recwarn.pop(ImportWarning) def test_import_warnings(self, recwarn): for mod in list(sys.modules): # list as we're changing as we iterate if 'encore' in mod: sys.modules.pop(mod, None) for pkg in ( 'MDAnalysis.analysis.encore.dimensionality_reduction.DimensionalityReductionMethod', 'MDAnalysis.analysis.encore.clustering.ClusteringMethod', ): self._check_sklearn_import_warns(pkg, recwarn) # This is a quickfix! Convert this to a parametrize call in future.
MDAnalysis/mdanalysis
testsuite/MDAnalysisTests/analysis/test_encore.py
Python
gpl-2.0
36,155
[ "MDAnalysis" ]
43f9b283ccfff99083c1eb499f0dfddf3f3e37d5384f1530deed9f87e2da108c
""" ================================================== Automatic Relevance Determination Regression (ARD) ================================================== Fit regression model with Bayesian Ridge Regression. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. The histogram of the estimated weights is very peaked, as a sparsity-inducing prior is implied on the weights. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import pylab as pl from scipy import stats from sklearn.linear_model import ARDRegression, LinearRegression ############################################################################### # Generating simulated data with Gaussian weigthts # Parameters of the example np.random.seed(0) n_samples, n_features = 100, 100 # Create gaussian data X = np.random.randn(n_samples, n_features) # Create weigts with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noite with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the ARD Regression clf = ARDRegression(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot the true weights, the estimated weights and the histogram of the # weights pl.figure(figsize=(6, 5)) pl.title("Weights of the model") pl.plot(clf.coef_, 'b-', label="ARD estimate") pl.plot(ols.coef_, 'r--', label="OLS estimate") pl.plot(w, 'g-', label="Ground truth") pl.xlabel("Features") pl.ylabel("Values of the weights") pl.legend(loc=1) pl.figure(figsize=(6, 5)) pl.title("Histogram of the weights") pl.hist(clf.coef_, bins=n_features, log=True) pl.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), 'ro', label="Relevant features") pl.ylabel("Features") pl.xlabel("Values of the weights") pl.legend(loc=1) pl.figure(figsize=(6, 5)) pl.title("Marginal log-likelihood") pl.plot(clf.scores_) pl.ylabel("Score") pl.xlabel("Iterations") pl.show()
depet/scikit-learn
examples/linear_model/plot_ard.py
Python
bsd-3-clause
2,588
[ "Gaussian" ]
a9eb60d0848d785c94120c1e03c2ff8411947a6b18fda513dfc3b1a15ea270d4
# # This is a set of functions to browse websites # import time import os import random import urllib, urllister import mapping from collections import Counter from random import randint from datetime import datetime from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.common.exceptions import TimeoutException import pickle # to load and save cookies def create_browser_driver(browser,extensions): """ Browser is a string specifying the browser time extensions is a list of extensions """ if browser == 'Firefox': fp = webdriver.FirefoxProfile() for ext in extensions: fp.add_extension(extension=ext) driver = webdriver.Firefox(firefox_profile=fp) return driver def save_ads_links(driver, directory, timestamp): """ Save ads link from a page Pretty raw """ frames = driver.find_elements_by_tag_name("iframe") file_name = directory + "/" + timestamp + "-ads-source.log" f = open(file_name,'w') for frame in frames: driver.switch_to.frame(frame) try: a = driver.find_element_by_tag_name('a') f.write(a.get_attribute('innerHTML').encode('utf-8')) f.write(a.get_attribute('href').encode('utf-8')) except Exception: f.write(driver.page_source.encode('utf-8')) driver.switch_to.default_content() f.close def get_ads_links(driver): """ Get ads links from page """ ads = list() frames = driver.find_elements_by_tag_name("iframe") if len(frames)>0: for frame in frames: driver.switch_to.frame(frame) try: a = driver.find_element_by_tag_name('a') ads.extend([a.get_attribute('innerHTML')]) ads.extend([a.get_attribute('href')]) except Exception: scripts = driver.find_elements_by_tag_name("iframe") for script in scripts: driver.switch_to.frame(script) try: ads.extend([script.get_attribute('src')]) except Exception: 'Do nothing, not found' driver.switch_to.default_content() driver.switch_to.frame(frame) driver.switch_to.default_content() return ads def save_headers(driver, directory, timestamp): """ Save headers and meta stuff from a page Pretty raw """ headers = driver.find_elements_by_tag_name("meta") file_name = directory + "/" + timestamp + "-meta-source.log" f = open(file_name,'w') for header in headers: f.write(header.get_attribute('outerHTML').encode('utf-8')) f.close def get_header_keywords(driver): """ return a list of keywords given a Selenium driver """ keywords = list() headers = driver.find_elements_by_tag_name("meta") for header in headers: if header.get_attribute('name') == 'keywords': keywords.extend(header.get_attribute('content').split(',')) return keywords def random_elem(elems): """ Return random element """ indices = random.sample(range(len(elems)), 1) sample_elem = [elems[i] for i in sorted(indices)][0] return sample_elem def links_in_page(page_url,tag): """ Given a html document and a tag, return all tag objects. """ usock = urllib.urlopen(page_url) parser = urllister.URLLister() parser.feed(usock.read()) usock.close() parser.close() return parser.urls def save_screenshot(driver, directory, timestamp): screenshot = directory + "/" + timestamp + "-screenshot.png" driver.get_screenshot_as_file(screenshot) def save_source(driver, directory, timestamp): source_dir = directory + "/" + timestamp + "-source.log" f = open(source_dir,'w') f.write(driver.page_source.encode('utf-8')) def update_profiles(driver, profile, ads_profile): keywords = get_header_keywords(driver) ads = get_ads_links(driver) if len(keywords) > 0: mapping.map_keywords_to_wikipedia_categories(keywords, profile) if len(ads) > 0: ads_tags = mapping.extract_params_from_url(ads) mapping.map_keywords_to_wikipedia_categories(ads_tags, ads_profile) def save_screenshot_and_sources(driver, directory, timestamp): save_screenshot(driver, directory, timestamp) save_source(driver, directory, timestamp) save_ads_links(driver, directory, timestamp) save_headers(driver, directory, timestamp) def save_cookies(driver, directory): # Write cookies to load later pickle.dump(driver.get_cookies() , open("cookies/cookies.pkl","wb")) # Write cookies to file file_dir = directory + "/cookies/cookies.log" f = open(file_dir,'w') cookies = driver.get_cookies() f.write(cookies) # python will convert \n to os.linesep f.close() def visit_list(url_list, driver, profile, ads_profile): """ Visit URL list and dump stuff """ current_time = "" current_time = str(time.strftime("%m%d%y%H%M%S", time.localtime())) directory = 'session' + str(current_time) if not os.path.exists(directory): os.makedirs(directory) os.makedirs(directory + "/cookies") for url in url_list: time.sleep(randint(5,15)) current_time = str(time.strftime("%m%d%y%H%M%S", time.localtime())) n = 1 # randint(2,9) visit_link(url, driver, profile, ads_profile, directory, current_time) i=0 while i < n: links = driver.find_elements_by_tag_name('a') if links: link = random_elem(links) time.sleep(randint(25,35)) current_time = str(time.strftime("%m%d%y%H%M%S", time.localtime())) visit_link(link.get_attribute("href"), driver, profile, ads_profile, directory, current_time) driver.back() # return to page that has 1,2,3,next -like links time.sleep(randint(5,15)) i +=1 else: break save_cookies(driver, directory) def visit_link(url, driver, profile, ads_profile, directory, timestamp): driver.get(url) # load page save_screenshot_and_sources(driver, directory, timestamp) update_profiles(driver, profile, ads_profile) mapping.create_bar_chart(profile, ads_profile, 16, directory, timestamp)
nopressurelabs/privacypy
crawl.py
Python
gpl-3.0
6,484
[ "VisIt" ]
cefa80357b3b755e7d89e98b279c70440fdcb863a1a42e0620956da3a6044158
from ee.cli.plugins.stack import EEStackController from ee.core.fileutils import EEFileUtils from ee.core.mysql import * from ee.core.shellexec import * from ee.core.sslutils import SSL from ee.core.variables import EEVariables from ee.cli.plugins.sitedb import * from ee.core.aptget import EEAptGet from ee.core.git import EEGit from ee.core.logging import Log from ee.core.sendmail import EESendMail from ee.core.services import EEService import subprocess from subprocess import CalledProcessError import os import random import string import sys import getpass import glob import re import platform class SiteError(Exception): """Custom Exception Occured when setting up site""" def __init__(self, message): self.message = message def __str__(self): return repr(self.message) def pre_run_checks(self): # Check nginx configuration Log.info(self, "Running pre-update checks, please wait...") try: Log.debug(self, "checking NGINX configuration ...") FNULL = open('/dev/null', 'w') ret = subprocess.check_call(["nginx", "-t"], stdout=FNULL, stderr=subprocess.STDOUT) except CalledProcessError as e: Log.debug(self, "{0}".format(str(e))) raise SiteError("nginx configuration check failed.") def check_domain_exists(self, domain): if getSiteInfo(self, domain): return True else: return False def setupdomain(self, data): #for debug purpose # for key, value in data.items() : # print (key, value) ee_domain_name = data['site_name'] ee_site_webroot = data['webroot'] if 'webroot' in data.keys() else '' # Check if nginx configuration already exists # if os.path.isfile('/etc/nginx/sites-available/{0}' # .format(ee_domain_name)): # raise SiteError("nginx configuration already exists for site") Log.info(self, "Setting up NGINX configuration \t", end='') # write nginx config for file try: ee_site_nginx_conf = open('/etc/nginx/sites-available/{0}' .format(ee_domain_name), encoding='utf-8', mode='w') if not data['php7']: self.app.render((data), 'virtualconf.mustache', out=ee_site_nginx_conf) else: self.app.render((data), 'virtualconf-php7.mustache', out=ee_site_nginx_conf) ee_site_nginx_conf.close() except IOError as e: Log.debug(self, "{0}".format(e)) raise SiteError("create nginx configuration failed for site") except Exception as e: Log.debug(self, "{0}".format(e)) raise SiteError("create nginx configuration failed for site") finally: # Check nginx -t and return status over it try: Log.debug(self, "Checking generated nginx conf, please wait...") FNULL = open('/dev/null', 'w') ret = subprocess.check_call(["nginx", "-t"], stdout=FNULL, stderr=subprocess.STDOUT) Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]") except CalledProcessError as e: Log.debug(self, "{0}".format(str(e))) Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]") raise SiteError("created nginx configuration failed for site." " check with `nginx -t`") # create symbolic link for EEFileUtils.create_symlink(self, ['/etc/nginx/sites-available/{0}' .format(ee_domain_name), '/etc/nginx/sites-enabled/{0}' .format(ee_domain_name)]) if 'proxy' in data.keys() and data['proxy']: return # Creating htdocs & logs directory Log.info(self, "Setting up webroot \t\t", end='') try: if not os.path.exists('{0}/htdocs'.format(ee_site_webroot)): os.makedirs('{0}/htdocs'.format(ee_site_webroot)) if not os.path.exists('{0}/logs'.format(ee_site_webroot)): os.makedirs('{0}/logs'.format(ee_site_webroot)) if not os.path.exists('{0}/conf/nginx'.format(ee_site_webroot)): os.makedirs('{0}/conf/nginx'.format(ee_site_webroot)) EEFileUtils.create_symlink(self, ['/var/log/nginx/{0}.access.log' .format(ee_domain_name), '{0}/logs/access.log' .format(ee_site_webroot)]) EEFileUtils.create_symlink(self, ['/var/log/nginx/{0}.error.log' .format(ee_domain_name), '{0}/logs/error.log' .format(ee_site_webroot)]) except Exception as e: Log.debug(self, "{0}".format(e)) raise SiteError("setup webroot failed for site") finally: # TODO Check if directories are setup if (os.path.exists('{0}/htdocs'.format(ee_site_webroot)) and os.path.exists('{0}/logs'.format(ee_site_webroot))): Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]") else: Log.info(self, "[" + Log.ENDC + "Fail" + Log.OKBLUE + "]") raise SiteError("setup webroot failed for site") def setupdatabase(self, data): ee_domain_name = data['site_name'] ee_random = (''.join(random.sample(string.ascii_uppercase + string.ascii_lowercase + string.digits, 15))) ee_replace_dot = ee_domain_name.replace('.', '_') prompt_dbname = self.app.config.get('mysql', 'db-name') prompt_dbuser = self.app.config.get('mysql', 'db-user') ee_mysql_grant_host = self.app.config.get('mysql', 'grant-host') ee_db_name = '' ee_db_username = '' ee_db_password = '' if prompt_dbname == 'True' or prompt_dbname == 'true': try: ee_db_name = input('Enter the MySQL database name [{0}]: ' .format(ee_replace_dot)) except EOFError as e: Log.debug(self, "{0}".format(e)) raise SiteError("Unable to input database name") if not ee_db_name: ee_db_name = ee_replace_dot if prompt_dbuser == 'True' or prompt_dbuser == 'true': try: ee_db_username = input('Enter the MySQL database user name [{0}]: ' .format(ee_replace_dot)) ee_db_password = getpass.getpass(prompt='Enter the MySQL database' ' password [{0}]: ' .format(ee_random)) except EOFError as e: Log.debug(self, "{0}".format(e)) raise SiteError("Unable to input database credentials") if not ee_db_username: ee_db_username = ee_replace_dot if not ee_db_password: ee_db_password = ee_random if len(ee_db_username) > 16: Log.debug(self, 'Autofix MySQL username (ERROR 1470 (HY000)),' ' please wait') ee_db_username = (ee_db_name[0:6] + generate_random()) # create MySQL database Log.info(self, "Setting up database\t\t", end='') Log.debug(self, "Creating database {0}".format(ee_db_name)) try: if EEMysql.check_db_exists(self, ee_db_name): Log.debug(self, "Database already exists, Updating DB_NAME .. ") ee_db_name = (ee_db_name[0:6] + generate_random()) ee_db_username = (ee_db_name[0:6] + generate_random()) except MySQLConnectionError as e: raise SiteError("MySQL Connectivity problem occured") try: EEMysql.execute(self, "create database `{0}`" .format(ee_db_name)) except StatementExcecutionError as e: Log.info(self, "[" + Log.ENDC + Log.FAIL + "Failed" + Log.OKBLUE + "]") raise SiteError("create database execution failed") # Create MySQL User Log.debug(self, "Creating user {0}".format(ee_db_username)) Log.debug(self, "create user `{0}`@`{1}` identified by ''" .format(ee_db_username, ee_mysql_grant_host)) try: EEMysql.execute(self, "create user `{0}`@`{1}` identified by '{2}'" .format(ee_db_username, ee_mysql_grant_host, ee_db_password), log=False) except StatementExcecutionError as e: Log.info(self, "[" + Log.ENDC + Log.FAIL + "Failed" + Log.OKBLUE + "]") raise SiteError("creating user failed for database") # Grant permission Log.debug(self, "Setting up user privileges") try: EEMysql.execute(self, "grant all privileges on `{0}`.* to `{1}`@`{2}`" .format(ee_db_name, ee_db_username, ee_mysql_grant_host)) except StatementExcecutionError as e: Log.info(self, "[" + Log.ENDC + Log.FAIL + "Failed" + Log.OKBLUE + "]") SiteError("grant privileges to user failed for database ") Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]") data['ee_db_name'] = ee_db_name data['ee_db_user'] = ee_db_username data['ee_db_pass'] = ee_db_password data['ee_db_host'] = EEVariables.ee_mysql_host data['ee_mysql_grant_host'] = ee_mysql_grant_host return(data) def setupwordpress(self, data): ee_domain_name = data['site_name'] ee_site_webroot = data['webroot'] prompt_wpprefix = self.app.config.get('wordpress', 'prefix') ee_wp_user = self.app.config.get('wordpress', 'user') ee_wp_pass = self.app.config.get('wordpress', 'password') ee_wp_email = self.app.config.get('wordpress', 'email') # Random characters ee_random = (''.join(random.sample(string.ascii_uppercase + string.ascii_lowercase + string.digits, 15))) ee_wp_prefix = '' # ee_wp_user = '' # ee_wp_pass = '' if 'wp-user' in data.keys() and data['wp-user']: ee_wp_user = data['wp-user'] if 'wp-email' in data.keys() and data['wp-email']: ee_wp_email = data['wp-email'] if 'wp-pass' in data.keys() and data['wp-pass']: ee_wp_pass = data['wp-pass'] Log.info(self, "Downloading WordPress \t\t", end='') EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot)) try: if EEShellExec.cmd_exec(self, "wp --allow-root core" " download"): pass else: Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]") raise SiteError("download WordPress core failed") except CommandExecutionError as e: Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]") raise SiteError(self, "download WordPress core failed") Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]") if not (data['ee_db_name'] and data['ee_db_user'] and data['ee_db_pass']): data = setupdatabase(self, data) if prompt_wpprefix == 'True' or prompt_wpprefix == 'true': try: ee_wp_prefix = input('Enter the WordPress table prefix [wp_]: ') while not re.match('^[A-Za-z0-9_]*$', ee_wp_prefix): Log.warn(self, "table prefix can only " "contain numbers, letters, and underscores") ee_wp_prefix = input('Enter the WordPress table prefix [wp_]: ' ) except EOFError as e: Log.debug(self, "{0}".format(e)) raise SiteError("input table prefix failed") if not ee_wp_prefix: ee_wp_prefix = 'wp_' # Modify wp-config.php & move outside the webroot EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot)) Log.debug(self, "Setting up wp-config file") if not data['multisite']: Log.debug(self, "Generating wp-config for WordPress Single site") Log.debug(self, "bash -c \"php {0} --allow-root " .format(EEVariables.ee_wpcli_path) + "core config " + "--dbname=\'{0}\' --dbprefix=\'{1}\' --dbuser=\'{2}\' " "--dbhost=\'{3}\' " .format(data['ee_db_name'], ee_wp_prefix, data['ee_db_user'], data['ee_db_host']) + "--dbpass= " "--extra-php<<PHP \n {1}\nPHP\"" .format(data['ee_db_pass'], "\n\ndefine(\'WP_DEBUG\', false);")) try: if EEShellExec.cmd_exec(self, "bash -c \"php {0} --allow-root" .format(EEVariables.ee_wpcli_path) + " core config " + "--dbname=\'{0}\' --dbprefix=\'{1}\' " "--dbuser=\'{2}\' --dbhost=\'{3}\' " .format(data['ee_db_name'], ee_wp_prefix, data['ee_db_user'], data['ee_db_host'] ) + "--dbpass=\'{0}\' " "--extra-php<<PHP \n {1} {redissalt}\nPHP\"" .format(data['ee_db_pass'], "\n\ndefine(\'WP_DEBUG\', false);", redissalt="\n\ndefine( \'WP_CACHE_KEY_SALT\', \'{0}:\' );" .format(ee_domain_name) if data['wpredis'] else ''), log=False ): pass else : raise SiteError("generate wp-config failed for wp single site") except CommandExecutionError as e: raise SiteError("generate wp-config failed for wp single site") else: Log.debug(self, "Generating wp-config for WordPress multisite") Log.debug(self, "bash -c \"php {0} --allow-root " .format(EEVariables.ee_wpcli_path) + "core config " + "--dbname=\'{0}\' --dbprefix=\'{1}\' --dbhost=\'{2}\' " .format(data['ee_db_name'], ee_wp_prefix, data['ee_db_host']) + "--dbuser=\'{0}\' --dbpass= " "--extra-php<<PHP \n {2} {3} {4}\nPHP\"" .format(data['ee_db_user'], data['ee_db_pass'], "\ndefine(\'WP_ALLOW_MULTISITE\', " "true);", "\ndefine(\'WPMU_ACCEL_REDIRECT\'," " true);", "\n\ndefine(\'WP_DEBUG\', false);")) try: if EEShellExec.cmd_exec(self, "bash -c \"php {0} --allow-root" .format(EEVariables.ee_wpcli_path) + " core config " + "--dbname=\'{0}\' --dbprefix=\'{1}\' " "--dbhost=\'{2}\' " .format(data['ee_db_name'], ee_wp_prefix, data['ee_db_host']) + "--dbuser=\'{0}\' --dbpass=\'{1}\' " "--extra-php<<PHP \n {2} {3} {4} {redissalt}\nPHP\"" .format(data['ee_db_user'], data['ee_db_pass'], "\ndefine(\'WP_ALLOW_MULTISITE\', " "true);", "\ndefine(\'WPMU_ACCEL_REDIRECT\'," " true);", "\n\ndefine(\'WP_DEBUG\', false);", redissalt="\n\ndefine( \'WP_CACHE_KEY_SALT\', \'{0}:\' );" .format(ee_domain_name) if data['wpredis'] else ''), log=False ): pass else: raise SiteError("generate wp-config failed for wp multi site") except CommandExecutionError as e: raise SiteError("generate wp-config failed for wp multi site") #EEFileUtils.mvfile(self, os.getcwd()+'/wp-config.php', # os.path.abspath(os.path.join(os.getcwd(), os.pardir))) try: import shutil Log.debug(self, "Moving file from {0} to {1}".format(os.getcwd()+'/wp-config.php',os.path.abspath(os.path.join(os.getcwd(), os.pardir)))) shutil.move(os.getcwd()+'/wp-config.php',os.path.abspath(os.path.join(os.getcwd(), os.pardir))) except Exception as e: Log.error(self, 'Unable to move file from {0} to {1}' .format(os.getcwd()+'/wp-config.php', os.path.abspath(os.path.join(os.getcwd(), os.pardir))),False) raise SiteError("Unable to move wp-config.php") if not ee_wp_user: ee_wp_user = EEVariables.ee_user while not ee_wp_user: Log.warn(self, "Username can have only alphanumeric" "characters, spaces, underscores, hyphens," "periods and the @ symbol.") try: ee_wp_user = input('Enter WordPress username: ') except EOFError as e: Log.debug(self, "{0}".format(e)) raise SiteError("input WordPress username failed") if not ee_wp_pass: ee_wp_pass = ee_random if not ee_wp_email: ee_wp_email = EEVariables.ee_email while not ee_wp_email: try: ee_wp_email = input('Enter WordPress email: ') except EOFError as e: Log.debug(self, "{0}".format(e)) raise SiteError("input WordPress username failed") try: while not re.match(r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$", ee_wp_email): Log.info(self, "EMail not Valid in config, " "Please provide valid email id") ee_wp_email = input("Enter your email: ") except EOFError as e: Log.debug(self, "{0}".format(e)) raise SiteError("input WordPress user email failed") Log.debug(self, "Setting up WordPress tables") if not data['multisite']: Log.debug(self, "Creating tables for WordPress Single site") Log.debug(self, "php {0} --allow-root core install " .format(EEVariables.ee_wpcli_path) + "--url=\'{0}\' --title=\'{0}\' --admin_name=\'{1}\' " .format(data['www_domain'], ee_wp_user) + "--admin_password= --admin_email=\'{1}\'" .format(ee_wp_pass, ee_wp_email)) try: if EEShellExec.cmd_exec(self, "php {0} --allow-root core " .format(EEVariables.ee_wpcli_path) + "install --url=\'{0}\' --title=\'{0}\' " "--admin_name=\'{1}\' " .format(data['www_domain'], ee_wp_user) + "--admin_password=\'{0}\' " "--admin_email=\'{1}\'" .format(ee_wp_pass, ee_wp_email), log=False): pass else: raise SiteError("setup WordPress tables failed for single site") except CommandExecutionError as e: raise SiteError("setup WordPress tables failed for single site") else: Log.debug(self, "Creating tables for WordPress multisite") Log.debug(self, "php {0} --allow-root " .format(EEVariables.ee_wpcli_path) + "core multisite-install " "--url=\'{0}\' --title=\'{0}\' --admin_name=\'{1}\' " .format(data['www_domain'], ee_wp_user) + "--admin_password= --admin_email=\'{1}\' " "{subdomains}" .format(ee_wp_pass, ee_wp_email, subdomains='--subdomains' if not data['wpsubdir'] else '')) try: if EEShellExec.cmd_exec(self, "php {0} --allow-root " .format(EEVariables.ee_wpcli_path) + "core multisite-install " "--url=\'{0}\' --title=\'{0}\' " "--admin_name=\'{1}\' " .format(data['www_domain'], ee_wp_user) + "--admin_password=\'{0}\' " "--admin_email=\'{1}\' " "{subdomains}" .format(ee_wp_pass, ee_wp_email, subdomains='--subdomains' if not data['wpsubdir'] else ''), log=False): pass else: raise SiteError("setup WordPress tables failed for wp multi site") except CommandExecutionError as e: raise SiteError("setup WordPress tables failed for wp multi site") Log.debug(self, "Updating WordPress permalink") try: EEShellExec.cmd_exec(self, " php {0} --allow-root " .format(EEVariables.ee_wpcli_path) + "rewrite structure " "/%year%/%monthnum%/%day%/%postname%/") except CommandExecutionError as e: raise SiteError("Update wordpress permalinks failed") """Install nginx-helper plugin """ installwp_plugin(self, 'nginx-helper', data) if data['wpfc']: plugin_data = '{"log_level":"INFO","log_filesize":5,"enable_purge":1,"enable_map":0,"enable_log":0,"enable_stamp":0,"purge_homepage_on_new":1,"purge_homepage_on_edit":1,"purge_homepage_on_del":1,"purge_archive_on_new":1,"purge_archive_on_edit":0,"purge_archive_on_del":0,"purge_archive_on_new_comment":0,"purge_archive_on_deleted_comment":0,"purge_page_on_mod":1,"purge_page_on_new_comment":1,"purge_page_on_deleted_comment":1,"cache_method":"enable_fastcgi","purge_method":"get_request","redis_hostname":"127.0.0.1","redis_port":"6379","redis_prefix":"nginx-cache:"}' setupwp_plugin(self, 'nginx-helper', 'rt_wp_nginx_helper_options', plugin_data, data) elif data['wpredis']: plugin_data = '{"log_level":"INFO","log_filesize":5,"enable_purge":1,"enable_map":0,"enable_log":0,"enable_stamp":0,"purge_homepage_on_new":1,"purge_homepage_on_edit":1,"purge_homepage_on_del":1,"purge_archive_on_new":1,"purge_archive_on_edit":0,"purge_archive_on_del":0,"purge_archive_on_new_comment":0,"purge_archive_on_deleted_comment":0,"purge_page_on_mod":1,"purge_page_on_new_comment":1,"purge_page_on_deleted_comment":1,"cache_method":"enable_redis","purge_method":"get_request","redis_hostname":"127.0.0.1","redis_port":"6379","redis_prefix":"nginx-cache:"}' setupwp_plugin(self, 'nginx-helper', 'rt_wp_nginx_helper_options', plugin_data, data) """Install Wp Super Cache""" if data['wpsc']: installwp_plugin(self, 'wp-super-cache', data) """Install Redis Cache""" if data['wpredis']: installwp_plugin(self, 'redis-cache', data) """Install W3 Total Cache""" if data['w3tc'] or data['wpfc']: installwp_plugin(self, 'w3-total-cache', data) wp_creds = dict(wp_user=ee_wp_user, wp_pass=ee_wp_pass, wp_email=ee_wp_email) return(wp_creds) def setupwordpressnetwork(self, data): ee_site_webroot = data['webroot'] EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot)) Log.info(self, "Setting up WordPress Network \t", end='') try: if EEShellExec.cmd_exec(self, 'wp --allow-root core multisite-convert' ' --title=\'{0}\' {subdomains}' .format(data['www_domain'], subdomains='--subdomains' if not data['wpsubdir'] else '')): pass else: Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]") raise SiteError("setup WordPress network failed") except CommandExecutionError as e: Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]") raise SiteError("setup WordPress network failed") Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]") def installwp_plugin(self, plugin_name, data): ee_site_webroot = data['webroot'] Log.info(self, "Installing plugin {0}, please wait..." .format(plugin_name)) EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot)) try: EEShellExec.cmd_exec(self, "php {0} plugin " .format(EEVariables.ee_wpcli_path) + "--allow-root install " "{0}".format(plugin_name)) except CommandExecutionError as e: raise SiteError("plugin installation failed") try: EEShellExec.cmd_exec(self, "php {0} plugin " .format(EEVariables.ee_wpcli_path) + "--allow-root activate " "{0} {na}" .format(plugin_name, na='--network' if data['multisite'] else '' )) except CommandExecutionError as e: raise SiteError("plugin activation failed") return 1 def uninstallwp_plugin(self, plugin_name, data): ee_site_webroot = data['webroot'] Log.debug(self, "Uninstalling plugin {0}, please wait..." .format(plugin_name)) EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot)) Log.info(self, "Uninstalling plugin {0}, please wait..." .format(plugin_name)) try: EEShellExec.cmd_exec(self, "php {0} plugin " .format(EEVariables.ee_wpcli_path) + "--allow-root deactivate " "{0}".format(plugin_name)) EEShellExec.cmd_exec(self, "php {0} plugin " .format(EEVariables.ee_wpcli_path) + "--allow-root uninstall " "{0}".format(plugin_name)) except CommandExecutionError as e: raise SiteError("plugin uninstall failed") def setupwp_plugin(self, plugin_name, plugin_option, plugin_data, data): ee_site_webroot = data['webroot'] Log.info(self, "Setting plugin {0}, please wait..." .format(plugin_name)) EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot)) if not data['multisite']: try: EEShellExec.cmd_exec(self, "php {0} " .format(EEVariables.ee_wpcli_path) + "--allow-root option update " "{0} \'{1}\' --format=json".format(plugin_option, plugin_data)) except CommandExecutionError as e: raise SiteError("plugin setup failed") else: try: EEShellExec.cmd_exec(self, "php {0} " .format(EEVariables.ee_wpcli_path) + "--allow-root network meta update 1 " "{0} \'{1}\' --format=json" .format(plugin_option, plugin_data )) except CommandExecutionError as e: raise SiteError("plugin setup failed") def setwebrootpermissions(self, webroot): Log.debug(self, "Setting up permissions") try: EEFileUtils.chown(self, webroot, EEVariables.ee_php_user, EEVariables.ee_php_user, recursive=True) except Exception as e: Log.debug(self, str(e)) raise SiteError("problem occured while setting up webroot permissions") def sitebackup(self, data): ee_site_webroot = data['webroot'] backup_path = ee_site_webroot + '/backup/{0}'.format(EEVariables.ee_date) if not EEFileUtils.isexist(self, backup_path): EEFileUtils.mkdir(self, backup_path) Log.info(self, "Backup location : {0}".format(backup_path)) EEFileUtils.copyfile(self, '/etc/nginx/sites-available/{0}' .format(data['site_name']), backup_path) if data['currsitetype'] in ['html', 'php', 'proxy', 'mysql']: if (data['pagespeed'] is True or data['old_pagespeed_status'] is True or data['php7'] is True) and not data['wp']: Log.info(self, "Backing up Webroot \t\t", end='') EEFileUtils.copyfiles(self, ee_site_webroot + '/htdocs', backup_path + '/htdocs') Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]") else: Log.info(self, "Backing up Webroot \t\t", end='') EEFileUtils.mvfile(self, ee_site_webroot + '/htdocs', backup_path) Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]") configfiles = glob.glob(ee_site_webroot + '/*-config.php') if not configfiles: #search for wp-config.php inside htdocs/ Log.debug(self, "Config files not found in {0}/ " .format(ee_site_webroot)) if data['currsitetype'] in ['mysql']: pass else: Log.debug(self, "Searching wp-config.php in {0}/htdocs/ " .format(ee_site_webroot)) configfiles = glob.glob(ee_site_webroot + '/htdocs/wp-config.php') # if configfiles and EEFileUtils.isexist(self, configfiles[0]): # ee_db_name = (EEFileUtils.grep(self, configfiles[0], # 'DB_NAME').split(',')[1] # .split(')')[0].strip().replace('\'', '')) if data['ee_db_name']: Log.info(self, 'Backing up database \t\t', end='') try: if not EEShellExec.cmd_exec(self, "mysqldump {0} > {1}/{0}.sql" .format(data['ee_db_name'], backup_path)): Log.info(self, "[" + Log.ENDC + Log.FAIL + "Fail" + Log.OKBLUE + "]") raise SiteError("mysqldump failed to backup database") except CommandExecutionError as e: Log.info(self, "[" + Log.ENDC + "Fail" + Log.OKBLUE + "]") raise SiteError("mysqldump failed to backup database") Log.info(self, "[" + Log.ENDC + "Done" + Log.OKBLUE + "]") # move wp-config.php/ee-config.php to backup if data['currsitetype'] in ['mysql', 'proxy']: if (data['pagespeed'] is True or data['old_pagespeed_status'] is True or data['php7'] is True) and not data['wp']: EEFileUtils.copyfile(self, configfiles[0], backup_path) else: EEFileUtils.mvfile(self, configfiles[0], backup_path) else: EEFileUtils.copyfile(self, configfiles[0], backup_path) def site_package_check(self, stype): apt_packages = [] packages = [] stack = EEStackController() stack.app = self.app if stype in ['html', 'proxy', 'php', 'mysql', 'wp', 'wpsubdir', 'wpsubdomain', 'php7']: Log.debug(self, "Setting apt_packages variable for Nginx") # Check if server has nginx-custom package if not (EEAptGet.is_installed(self, 'nginx-custom') or EEAptGet.is_installed(self, 'nginx-mainline')): # check if Server has nginx-plus installed if EEAptGet.is_installed(self, 'nginx-plus'): # do something # do post nginx installation configuration Log.info(self, "NGINX PLUS Detected ...") apt = ["nginx-plus"] + EEVariables.ee_nginx #apt_packages = apt_packages + EEVariables.ee_nginx stack.post_pref(apt, packages) else: apt_packages = apt_packages + EEVariables.ee_nginx else: # Fix for Nginx white screen death if not EEFileUtils.grep(self, '/etc/nginx/fastcgi_params', 'SCRIPT_FILENAME'): with open('/etc/nginx/fastcgi_params', encoding='utf-8', mode='a') as ee_nginx: ee_nginx.write('fastcgi_param \tSCRIPT_FILENAME ' '\t$request_filename;\n') if self.app.pargs.php and self.app.pargs.php7: Log.error(self,"INVALID OPTION: PHP 7.0 provided with PHP 5.0") if not self.app.pargs.php7 and stype in ['php', 'mysql', 'wp', 'wpsubdir', 'wpsubdomain']: Log.debug(self, "Setting apt_packages variable for PHP") if EEVariables.ee_platform_codename != 'trusty': if not EEAptGet.is_installed(self, 'php5-fpm'): apt_packages = apt_packages + EEVariables.ee_php else: if not EEAptGet.is_installed(self, 'php5.6-fpm'): apt_packages = apt_packages + EEVariables.ee_php5_6 + EEVariables.ee_php_extra if self.app.pargs.php7 and stype in [ 'mysql', 'wp', 'wpsubdir', 'wpsubdomain']: if EEVariables.ee_platform_codename == 'trusty': Log.debug(self, "Setting apt_packages variable for PHP 5.6") if not EEAptGet.is_installed(self, 'php5.6-fpm'): apt_packages = apt_packages + EEVariables.ee_php5_6 Log.debug(self, "Setting apt_packages variable for PHP 7.0") if not EEAptGet.is_installed(self, 'php7.0-fpm'): apt_packages = apt_packages + EEVariables.ee_php7_0 + EEVariables.ee_php_extra else: Log.warn(self, "PHP 7.0 not available for your system.") Log.info(self,"Creating site with PHP 5.6") if not EEAptGet.is_installed(self, 'php5-fpm'): Log.info(self, "Setting apt_packages variable for PHP") Log.debug(self, "Setting apt_packages variable for PHP") apt_packages = apt_packages + EEVariables.ee_php if stype in ['mysql', 'wp', 'wpsubdir', 'wpsubdomain']: Log.debug(self, "Setting apt_packages variable for MySQL") if not EEShellExec.cmd_exec(self, "mysqladmin ping"): apt_packages = apt_packages + EEVariables.ee_mysql packages = packages + [["https://raw.githubusercontent.com/" "major/MySQLTuner-perl/master/" "mysqltuner.pl", "/usr/bin/mysqltuner", "MySQLTuner"]] if stype in ['php', 'mysql', 'wp', 'wpsubdir', 'wpsubdomain']: Log.debug(self, "Setting apt_packages variable for Postfix") if not EEAptGet.is_installed(self, 'postfix'): apt_packages = apt_packages + EEVariables.ee_postfix if stype in ['wp', 'wpsubdir', 'wpsubdomain']: Log.debug(self, "Setting packages variable for WP-CLI") if not EEShellExec.cmd_exec(self, "which wp"): packages = packages + [["https://github.com/wp-cli/wp-cli/" "releases/download/v{0}/" "wp-cli-{0}.phar" .format(EEVariables.ee_wp_cli), "/usr/bin/wp", "WP-CLI"]] if self.app.pargs.wpredis: Log.debug(self, "Setting apt_packages variable for redis") if not EEAptGet.is_installed(self, 'redis-server'): apt_packages = apt_packages + EEVariables.ee_redis if os.path.isfile("/etc/nginx/nginx.conf") and (not os.path.isfile("/etc/nginx/common/redis.conf")): data = dict() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/redis.conf') ee_nginx = open('/etc/nginx/common/redis.conf', encoding='utf-8', mode='w') self.app.render((data), 'redis.mustache', out=ee_nginx) ee_nginx.close() if os.path.isfile("/etc/nginx/nginx.conf") and (not os.path.isfile("/etc/nginx/common/redis-hhvm.conf")): data = dict() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/redis-hhvm.conf') ee_nginx = open('/etc/nginx/common/redis-hhvm.conf', encoding='utf-8', mode='w') self.app.render((data), 'redis-hhvm.mustache', out=ee_nginx) ee_nginx.close() if os.path.isfile("/etc/nginx/conf.d/upstream.conf"): if not EEFileUtils.grep(self, "/etc/nginx/conf.d/" "upstream.conf", "redis"): with open("/etc/nginx/conf.d/upstream.conf", "a") as redis_file: redis_file.write("upstream redis {\n" " server 127.0.0.1:6379;\n" " keepalive 10;\n}") if os.path.isfile("/etc/nginx/nginx.conf") and (not os.path.isfile("/etc/nginx/conf.d/redis.conf")): with open("/etc/nginx/conf.d/redis.conf", "a") as redis_file: redis_file.write("# Log format Settings\n" "log_format rt_cache_redis '$remote_addr $upstream_response_time $srcache_fetch_status [$time_local] '\n" "'$http_host \"$request\" $status $body_bytes_sent '\n" "'\"$http_referer\" \"$http_user_agent\"';\n") if self.app.pargs.hhvm: if platform.architecture()[0] is '32bit': Log.error(self, "HHVM is not supported by 32bit system") Log.debug(self, "Setting apt_packages variable for HHVM") if not EEAptGet.is_installed(self, 'hhvm'): apt_packages = apt_packages + EEVariables.ee_hhvm if os.path.isdir("/etc/nginx/common") and (not os.path.isfile("/etc/nginx/common/php-hhvm.conf")): data = dict() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/php-hhvm.conf') ee_nginx = open('/etc/nginx/common/php-hhvm.conf', encoding='utf-8', mode='w') self.app.render((data), 'php-hhvm.mustache', out=ee_nginx) ee_nginx.close() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/w3tc-hhvm.conf') ee_nginx = open('/etc/nginx/common/w3tc-hhvm.conf', encoding='utf-8', mode='w') self.app.render((data), 'w3tc-hhvm.mustache', out=ee_nginx) ee_nginx.close() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/wpfc-hhvm.conf') ee_nginx = open('/etc/nginx/common/wpfc-hhvm.conf', encoding='utf-8', mode='w') self.app.render((data), 'wpfc-hhvm.mustache', out=ee_nginx) ee_nginx.close() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/wpsc-hhvm.conf') ee_nginx = open('/etc/nginx/common/wpsc-hhvm.conf', encoding='utf-8', mode='w') self.app.render((data), 'wpsc-hhvm.mustache', out=ee_nginx) ee_nginx.close() if os.path.isfile("/etc/nginx/conf.d/upstream.conf"): if not EEFileUtils.grep(self, "/etc/nginx/conf.d/upstream.conf", "hhvm"): with open("/etc/nginx/conf.d/upstream.conf", "a") as hhvm_file: hhvm_file.write("upstream hhvm {\nserver 127.0.0.1:8000;\n" "server 127.0.0.1:9000 backup;\n}\n") if self.app.pargs.php7: if EEVariables.ee_platform_codename != 'trusty': Log.error(self,"PHP 7.0 is not supported in your Platform") Log.debug(self, "Setting apt_packages variable for PHP 7.0") if not EEAptGet.is_installed(self, 'php7.0-fpm'): apt_packages = apt_packages + EEVariables.ee_php7_0 + EEVariables.ee_php_extra if os.path.isdir("/etc/nginx/common") and (not os.path.isfile("/etc/nginx/common/php7.conf")): data = dict() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/locations-php7.conf') ee_nginx = open('/etc/nginx/common/locations-php7.conf', encoding='utf-8', mode='w') self.app.render((data), 'locations-php7.mustache', out=ee_nginx) ee_nginx.close() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/php7.conf') ee_nginx = open('/etc/nginx/common/php7.conf', encoding='utf-8', mode='w') self.app.render((data), 'php7.mustache', out=ee_nginx) ee_nginx.close() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/w3tc-php7.conf') ee_nginx = open('/etc/nginx/common/w3tc-php7.conf', encoding='utf-8', mode='w') self.app.render((data), 'w3tc-php7.mustache', out=ee_nginx) ee_nginx.close() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/wpcommon-php7.conf') ee_nginx = open('/etc/nginx/common/wpcommon-php7.conf', encoding='utf-8', mode='w') self.app.render((data), 'wpcommon-php7.mustache', out=ee_nginx) ee_nginx.close() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/wpfc-php7.conf') ee_nginx = open('/etc/nginx/common/wpfc-php7.conf', encoding='utf-8', mode='w') self.app.render((data), 'wpfc-php7.mustache', out=ee_nginx) ee_nginx.close() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/wpsc-php7.conf') ee_nginx = open('/etc/nginx/common/wpsc-php7.conf', encoding='utf-8', mode='w') self.app.render((data), 'wpsc-php7.mustache', out=ee_nginx) ee_nginx.close() if os.path.isfile("/etc/nginx/nginx.conf") and (not os.path.isfile("/etc/nginx/common/redis-php7.conf")): data = dict() Log.debug(self, 'Writting the nginx configuration to ' 'file /etc/nginx/common/redis-php7.conf') ee_nginx = open('/etc/nginx/common/redis-php7.conf', encoding='utf-8', mode='w') self.app.render((data), 'redis-php7.mustache', out=ee_nginx) ee_nginx.close() if os.path.isfile("/etc/nginx/conf.d/upstream.conf"): if not EEFileUtils.grep(self, "/etc/nginx/conf.d/upstream.conf", "php7"): with open("/etc/nginx/conf.d/upstream.conf", "a") as php_file: php_file.write("upstream php7 {\nserver 127.0.0.1:9070;\n}\n" "upstream debug7 {\nserver 127.0.0.1:9170;\n}\n") # Check if Nginx is allready installed and Pagespeed config there or not # If not then copy pagespeed config if self.app.pargs.pagespeed: if (os.path.isfile('/etc/nginx/nginx.conf') and (not os.path.isfile('/etc/nginx/conf.d/pagespeed.conf'))): # Pagespeed configuration data = dict() Log.debug(self, 'Writting the Pagespeed Global ' 'configuration to file /etc/nginx/conf.d/' 'pagespeed.conf') ee_nginx = open('/etc/nginx/conf.d/pagespeed.conf', encoding='utf-8', mode='w') self.app.render((data), 'pagespeed-global.mustache', out=ee_nginx) ee_nginx.close() return(stack.install(apt_packages=apt_packages, packages=packages, disp_msg=False)) def updatewpuserpassword(self, ee_domain, ee_site_webroot): ee_wp_user = '' ee_wp_pass = '' EEFileUtils.chdir(self, '{0}/htdocs/'.format(ee_site_webroot)) # Check if ee_domain is wordpress install try: is_wp = EEShellExec.cmd_exec(self, "wp --allow-root core" " version") except CommandExecutionError as e: raise SiteError("is WordPress site? check command failed ") # Exit if ee_domain is not wordpress install if not is_wp: Log.error(self, "{0} does not seem to be a WordPress site" .format(ee_domain)) try: ee_wp_user = input("Provide WordPress user name [admin]: ") except Exception as e: Log.debug(self, "{0}".format(e)) Log.error(self, "\nCould not update password") if ee_wp_user == "?": Log.info(self, "Fetching WordPress user list") try: EEShellExec.cmd_exec(self, "wp --allow-root user list " "--fields=user_login | grep -v user_login") except CommandExecutionError as e: raise SiteError("fetch wp userlist command failed") if not ee_wp_user: ee_wp_user = 'admin' try: is_user_exist = EEShellExec.cmd_exec(self, "wp --allow-root user list " "--fields=user_login | grep {0}$ " .format(ee_wp_user)) except CommandExecutionError as e: raise SiteError("if wp user exists check command failed") if is_user_exist: try: ee_wp_pass = getpass.getpass(prompt="Provide password for " "{0} user: " .format(ee_wp_user)) while not ee_wp_pass: ee_wp_pass = getpass.getpass(prompt="Provide password for " "{0} user: " .format(ee_wp_user)) except Exception as e: Log.debug(self, "{0}".format(e)) raise SiteError("failed to read password input ") try: EEShellExec.cmd_exec(self, "wp --allow-root user update {0}" " --user_pass={1}" .format(ee_wp_user, ee_wp_pass)) except CommandExecutionError as e: raise SiteError("wp user password update command failed") Log.info(self, "Password updated successfully") else: Log.error(self, "Invalid WordPress user {0} for {1}." .format(ee_wp_user, ee_domain)) def display_cache_settings(self, data): if data['wpsc']: if data['multisite']: Log.info(self, "Configure WPSC:" "\t\thttp://{0}/wp-admin/network/settings.php?" "page=wpsupercache" .format(data['site_name'])) else: Log.info(self, "Configure WPSC:" "\t\thttp://{0}/wp-admin/options-general.php?" "page=wpsupercache" .format(data['site_name'])) if data['wpredis']: if data['multisite']: Log.info(self, "Configure redis-cache:" "\thttp://{0}/wp-admin/network/settings.php?" "page=redis-cache".format(data['site_name'])) else: Log.info(self, "Configure redis-cache:" "\thttp://{0}/wp-admin/options-general.php?" "page=redis-cache".format(data['site_name'])) Log.info(self, "Object Cache:\t\tEnable") if data['wpfc'] or data['w3tc']: if data['multisite']: Log.info(self, "Configure W3TC:" "\t\thttp://{0}/wp-admin/network/admin.php?" "page=w3tc_general".format(data['site_name'])) else: Log.info(self, "Configure W3TC:" "\t\thttp://{0}/wp-admin/admin.php?" "page=w3tc_general".format(data['site_name'])) if data['wpfc']: Log.info(self, "Page Cache:\t\tDisable") elif data['w3tc']: Log.info(self, "Page Cache:\t\tDisk Enhanced") Log.info(self, "Database Cache:\t\tMemcached") Log.info(self, "Object Cache:\t\tMemcached") Log.info(self, "Browser Cache:\t\tDisable") def logwatch(self, logfiles): import zlib import base64 import time from ee.core import logwatch def callback(filename, lines): for line in lines: if line.find(':::') == -1: print(line) else: data = line.split(':::') try: print(data[0], data[1], zlib.decompress(base64.decodestring(data[2]))) except Exception as e: Log.info(time.time(), 'caught exception rendering a new log line in %s' % filename) l = logwatch.LogWatcher(logfiles, callback) l.loop() def detSitePar(opts): """ Takes dictionary of parsed arguments 1.returns sitetype and cachetype 2. raises RuntimeError when wrong combination is used like "--wp --wpsubdir" or "--html --wp" """ sitetype, cachetype = '', '' typelist = list() cachelist = list() for key, val in opts.items(): if val and key in ['html', 'php', 'mysql', 'wp', 'wpsubdir', 'wpsubdomain','php7']: typelist.append(key) elif val and key in ['wpfc', 'wpsc', 'w3tc', 'wpredis']: cachelist.append(key) if len(typelist) > 1 or len(cachelist) > 1: if len(cachelist) > 1: raise RuntimeError("Could not determine cache type.Multiple cache parameter entered") elif False not in [x in ('php','mysql','html') for x in typelist]: sitetype = 'mysql' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('php7','mysql','html') for x in typelist]: sitetype = 'mysql' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('php','mysql') for x in typelist]: sitetype = 'mysql' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('php7','mysql') for x in typelist]: sitetype = 'mysql' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('html','mysql') for x in typelist]: sitetype = 'mysql' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('php','html') for x in typelist]: sitetype = 'php' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('php7','html') for x in typelist]: sitetype = 'php7' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('wp','wpsubdir') for x in typelist]: sitetype = 'wpsubdir' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('wp','wpsubdomain') for x in typelist]: sitetype = 'wpsubdomain' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('wp','php7') for x in typelist]: sitetype = 'wp' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('wpsubdir','php7') for x in typelist]: sitetype = 'wpsubdir' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] elif False not in [x in ('wpsubdomain','php7') for x in typelist]: sitetype = 'wpsubdomain' if not cachelist: cachetype = 'basic' else: cachetype = cachelist[0] else: raise RuntimeError("could not determine site and cache type") else: if not typelist and not cachelist: sitetype = None cachetype = None elif (not typelist or "php7" in typelist) and cachelist: sitetype = 'wp' cachetype = cachelist[0] elif typelist and (not cachelist): sitetype = typelist[0] cachetype = 'basic' else: sitetype = typelist[0] cachetype = cachelist[0] return (sitetype, cachetype) def generate_random(): ee_random10 = (''.join(random.sample(string.ascii_uppercase + string.ascii_lowercase + string.digits, 10))) return ee_random10 def deleteDB(self, dbname, dbuser, dbhost, exit=True): try: # Check if Database exists try: if EEMysql.check_db_exists(self, dbname): # Drop database if exists Log.debug(self, "dropping database `{0}`".format(dbname)) EEMysql.execute(self, "drop database `{0}`".format(dbname), errormsg='Unable to drop database {0}' .format(dbname)) except StatementExcecutionError as e: Log.debug(self, "drop database failed") Log.info(self, "Database {0} not dropped".format(dbname)) except MySQLConnectionError as e: Log.debug(self, "Mysql Connection problem occured") if dbuser != 'root': Log.debug(self, "dropping user `{0}`".format(dbuser)) try: EEMysql.execute(self, "drop user `{0}`@`{1}`" .format(dbuser, dbhost)) except StatementExcecutionError as e: Log.debug(self, "drop database user failed") Log.info(self, "Database {0} not dropped".format(dbuser)) try: EEMysql.execute(self, "flush privileges") except StatementExcecutionError as e: Log.debug(self, "drop database failed") Log.info(self, "Database {0} not dropped".format(dbname)) except Exception as e: Log.error(self, "Error occured while deleting database", exit) def deleteWebRoot(self, webroot): # do some preprocessing before proceeding webroot = webroot.strip() if (webroot == "/var/www/" or webroot == "/var/www" or webroot == "/var/www/.." or webroot == "/var/www/."): Log.debug(self, "Tried to remove {0}, but didn't remove it" .format(webroot)) return False if os.path.isdir(webroot): Log.debug(self, "Removing {0}".format(webroot)) EEFileUtils.rm(self, webroot) return True else: Log.debug(self, "{0} does not exist".format(webroot)) return False def removeNginxConf(self, domain): if os.path.isfile('/etc/nginx/sites-available/{0}' .format(domain)): Log.debug(self, "Removing Nginx configuration") EEFileUtils.rm(self, '/etc/nginx/sites-enabled/{0}' .format(domain)) EEFileUtils.rm(self, '/etc/nginx/sites-available/{0}' .format(domain)) EEService.reload_service(self, 'nginx') EEGit.add(self, ["/etc/nginx"], msg="Deleted {0} " .format(domain)) def doCleanupAction(self, domain='', webroot='', dbname='', dbuser='', dbhost=''): """ Removes the nginx configuration and database for the domain provided. doCleanupAction(self, domain='sitename', webroot='', dbname='', dbuser='', dbhost='') """ if domain: if os.path.isfile('/etc/nginx/sites-available/{0}' .format(domain)): removeNginxConf(self, domain) if webroot: deleteWebRoot(self, webroot) if dbname: if not dbuser: raise SiteError("dbuser not provided") if not dbhost: raise SiteError("dbhost not provided") deleteDB(self, dbname, dbuser, dbhost) def operateOnPagespeed(self, data): ee_domain_name = data['site_name'] ee_site_webroot = data['webroot'] if data['pagespeed'] is True: if not os.path.isfile("{0}/conf/nginx/pagespeed.conf.disabled" .format(ee_site_webroot)): Log.debug(self, 'Writting the Pagespeed common ' 'configuration to file {0}/conf/nginx/pagespeed.conf' 'pagespeed.conf'.format(ee_site_webroot)) ee_nginx = open('{0}/conf/nginx/pagespeed.conf' .format(ee_site_webroot), encoding='utf-8', mode='w') self.app.render((data), 'pagespeed-common.mustache', out=ee_nginx) ee_nginx.close() else: EEFileUtils.mvfile(self, "{0}/conf/nginx/pagespeed.conf.disabled" .format(ee_site_webroot), '{0}/conf/nginx/pagespeed.conf' .format(ee_site_webroot)) elif data['pagespeed'] is False: if os.path.isfile("{0}/conf/nginx/pagespeed.conf" .format(ee_site_webroot)): EEFileUtils.mvfile(self, "{0}/conf/nginx/pagespeed.conf" .format(ee_site_webroot), '{0}/conf/nginx/pagespeed.conf.disabled' .format(ee_site_webroot)) # Add nginx conf folder into GIT EEGit.add(self, ["{0}/conf/nginx".format(ee_site_webroot)], msg="Adding Pagespeed config of site: {0}" .format(ee_domain_name)) def cloneLetsEncrypt(self): letsencrypt_repo = "https://github.com/letsencrypt/letsencrypt" if not os.path.isdir("/opt"): EEFileUtils.mkdir(self,"/opt") try: Log.info(self, "Downloading {0:20}".format("LetsEncrypt"), end=' ') EEFileUtils.chdir(self, '/opt/') EEShellExec.cmd_exec(self, "git clone {0}".format(letsencrypt_repo)) Log.info(self, "{0}".format("[" + Log.ENDC + "Done" + Log.OKBLUE + "]")) return True except Exception as e: Log.debug(self, "[{err}]".format(err=str(e.reason))) Log.error(self, "Unable to download file, LetsEncrypt") return False def setupLetsEncrypt(self, ee_domain_name): ee_wp_email = EEVariables.ee_email while not ee_wp_email: try: ee_wp_email = input('Enter WordPress email: ') except EOFError as e: Log.debug(self, "{0}".format(e)) raise SiteError("input WordPress username failed") if not os.path.isdir("/opt/letsencrypt"): cloneLetsEncrypt(self) EEFileUtils.chdir(self, '/opt/letsencrypt') EEShellExec.cmd_exec(self, "git pull") if os.path.isfile("/etc/letsencrypt/renewal/{0}.conf".format(ee_domain_name)): Log.debug(self, "LetsEncrypt SSL Certificate found for the domain {0}" .format(ee_domain_name)) ssl= archivedCertificateHandle(self,ee_domain_name,ee_wp_email) else: Log.warn(self,"Please Wait while we fetch SSL Certificate for your site.\nIt may take time depending upon network.") ssl = EEShellExec.cmd_exec(self, "./letsencrypt-auto certonly --webroot -w /var/www/{0}/htdocs/ -d {0} -d www.{0} " .format(ee_domain_name) + "--email {0} --text --agree-tos".format(ee_wp_email)) if ssl: Log.info(self, "Let's Encrypt successfully setup for your site") Log.info(self, "Your certificate and chain have been saved at " "/etc/letsencrypt/live/{0}/fullchain.pem".format(ee_domain_name)) Log.info(self, "Configuring Nginx SSL configuration") try: Log.info(self, "Adding /var/www/{0}/conf/nginx/ssl.conf".format(ee_domain_name)) sslconf = open("/var/www/{0}/conf/nginx/ssl.conf" .format(ee_domain_name), encoding='utf-8', mode='w') sslconf.write("listen 443 ssl {http2};\n".format(http2=("http2" if EEAptGet.is_installed(self,'nginx-mainline') else "spdy")) + "ssl on;\n" "ssl_certificate /etc/letsencrypt/live/{0}/fullchain.pem;\n" "ssl_certificate_key /etc/letsencrypt/live/{0}/privkey.pem;\n" .format(ee_domain_name)) sslconf.close() # updateSiteInfo(self, ee_domain_name, ssl=True) EEGit.add(self, ["/etc/letsencrypt"], msg="Adding letsencrypt folder") except IOError as e: Log.debug(self, str(e)) Log.debug(self, "Error occured while generating " "ssl.conf") else: Log.error(self, "Unable to setup, Let\'s Encrypt", False) Log.error(self, "Please make sure that your site is pointed to \n" "same server on which you are running Let\'s Encrypt Client " "\n to allow it to verify the site automatically.") def renewLetsEncrypt(self, ee_domain_name): ee_wp_email = EEVariables.ee_email while not ee_wp_email: try: ee_wp_email = input('Enter email address: ') except EOFError as e: Log.debug(self, "{0}".format(e)) raise SiteError("Input WordPress email failed") if not os.path.isdir("/opt/letsencrypt"): cloneLetsEncrypt(self) EEFileUtils.chdir(self, '/opt/letsencrypt') EEShellExec.cmd_exec(self, "git pull") Log.info(self, "Renewing SSl cert for https://{0}".format(ee_domain_name)) ssl = EEShellExec.cmd_exec(self, "./letsencrypt-auto --renew certonly --webroot -w /var/www/{0}/htdocs/ -d {0} -d www.{0} " .format(ee_domain_name) + "--email {0} --text --agree-tos".format(ee_wp_email)) mail_list = '' if not ssl: Log.error(self,"ERROR : Cannot RENEW SSL cert !",False) if (SSL.getExpirationDays(self,ee_domain_name)>0): Log.error(self, "Your current cert will expire within " + str(SSL.getExpirationDays(self,ee_domain_name)) + " days.",False) else: Log.error(self, "Your current cert already EXPIRED !",False) EESendMail("easyengine@{0}".format(ee_domain_name), ee_wp_email, "[FAIL] SSL cert renewal {0}".format(ee_domain_name), "Hey Hi,\n\nSSL Certificate renewal for https://{0} was unsuccessful.".format(ee_domain_name) + "\nPlease check easyengine log for reason. Your SSL Expiry date : " + str(SSL.getExpirationDate(self,ee_domain_name)) + "\n\nFor support visit https://easyengine.io/support/ .\n\nYour's faithfully,\nEasyEngine",files=mail_list, port=25, isTls=False) Log.error(self, "Check logs for reason " "`tail /var/log/ee/ee.log` & Try Again!!!") EEGit.add(self, ["/etc/letsencrypt"], msg="Adding letsencrypt folder") EESendMail("easyengine@{0}".format(ee_domain_name), ee_wp_email, "[SUCCESS] SSL cert renewal {0}".format(ee_domain_name), "Hey Hi,\n\nYour SSL Certificate has been renewed for https://{0} .".format(ee_domain_name) + "\nYour SSL will Expire on : " + str(SSL.getExpirationDate(self,ee_domain_name)) + "\n\nYour's faithfully,\nEasyEngine",files=mail_list, port=25, isTls=False) #redirect= False to disable https redirection def httpsRedirect(self,ee_domain_name,redirect=True): if redirect: if os.path.isfile("/etc/nginx/conf.d/force-ssl-{0}.conf.disabled".format(ee_domain_name)): EEFileUtils.mvfile(self, "/etc/nginx/conf.d/force-ssl-{0}.conf.disabled".format(ee_domain_name), "/etc/nginx/conf.d/force-ssl-{0}.conf".format(ee_domain_name)) else: try: Log.info(self, "Adding /etc/nginx/conf.d/force-ssl-{0}.conf".format(ee_domain_name)) sslconf = open("/etc/nginx/conf.d/force-ssl-{0}.conf" .format(ee_domain_name), encoding='utf-8', mode='w') sslconf.write("server {\n" "\tlisten 80;\n" + "\tserver_name www.{0} {0};\n".format(ee_domain_name) + "\treturn 301 https://{0}".format(ee_domain_name)+"$request_uri;\n}" ) sslconf.close() # Nginx Configation into GIT except IOError as e: Log.debug(self, str(e)) Log.debug(self, "Error occured while generating " "/etc/nginx/conf.d/force-ssl-{0}.conf".format(ee_domain_name)) Log.info(self, "Added HTTPS Force Redirection for Site " " http://{0}".format(ee_domain_name)) EEGit.add(self, ["/etc/nginx"], msg="Adding /etc/nginx/conf.d/force-ssl-{0}.conf".format(ee_domain_name)) else: if os.path.isfile("/etc/nginx/conf.d/force-ssl-{0}.conf".format(ee_domain_name)): EEFileUtils.mvfile(self, "/etc/nginx/conf.d/force-ssl-{0}.conf".format(ee_domain_name), "/etc/nginx/conf.d/force-ssl-{0}.conf.disabled".format(ee_domain_name)) Log.info(self, "Disabled HTTPS Force Redirection for Site " " http://{0}".format(ee_domain_name)) def archivedCertificateHandle(self,domain,ee_wp_email): Log.warn(self,"You already have an existing certificate for the domain requested.\n" "(ref: /etc/letsencrypt/renewal/{0}.conf)".format(domain) + "\nPlease select an option from below?" "\n\t1: Reinstall existing certificate" "\n\t2: Keep the existing certificate for now" "\n\t3: Renew & replace the certificate (limit ~5 per 7 days)" "") check_prompt = input("\nType the appropriate number [1-3] or any other key to cancel: ") if not os.path.isfile("/etc/letsencrypt/live/{0}/cert.pem".format(domain)): Log.error(self,"/etc/letsencrypt/live/{0}/cert.pem file is missing.".format(domain)) if check_prompt == "1": Log.info(self,"Please Wait while we reinstall SSL Certificate for your site.\nIt may take time depending upon network.") ssl = EEShellExec.cmd_exec(self, "./letsencrypt-auto certonly --reinstall --webroot -w /var/www/{0}/htdocs/ -d {0} -d www.{0} " .format(domain) + "--email {0} --text --agree-tos".format(ee_wp_email)) elif check_prompt == "2" : Log.info(self,"Using Existing Certificate files") if not (os.path.isfile("/etc/letsencrypt/live/{0}/fullchain.pem".format(domain)) or os.path.isfile("/etc/letsencrypt/live/{0}/privkey.pem".format(domain))): Log.error(self,"Certificate files not found. Skipping.\n" "Please check if following file exist\n\t/etc/letsencrypt/live/{0}/fullchain.pem\n\t" "/etc/letsencrypt/live/{0}/privkey.pem".format(domain)) ssl = True elif check_prompt == "3": Log.info(self,"Please Wait while we renew SSL Certificate for your site.\nIt may take time depending upon network.") ssl = EEShellExec.cmd_exec(self, "./letsencrypt-auto --renew certonly --webroot -w /var/www/{0}/htdocs/ -d {0} -d www.{0} " .format(domain) + "--email {0} --text --agree-tos".format(ee_wp_email)) else: Log.error(self,"Operation cancelled by user.") if os.path.isfile("{0}/conf/nginx/ssl.conf" .format(domain)): Log.info(self, "Existing ssl.conf . Backing it up ..") EEFileUtils.mvfile(self, "/var/www/{0}/conf/nginx/ssl.conf" .format(domain), '/var/www/{0}/conf/nginx/ssl.conf.bak' .format(domain)) return ssl
harshadyeola/easyengine
ee/cli/plugins/site_functions.py
Python
mit
70,749
[ "VisIt" ]
d169d735e73808d446fbacf77b25744dfbdae45d66310d0e2d278af53939134a
# Licensed under GPL version 3 - see LICENSE.rst '''Sources generate photons with all photon properties: energy, direction, time, polarization. For objects of type `AstroSource`, the coordinates of the photon origin on the sky are added to the photon list. `astropy.coords.SkyCoord` is an object well suited for this task. These objects can be added to photon tables through the mechanims of `mixin columns <http://docs.astropy.org/en/latest/table/index.html#mixin-columns>`). However, mix-in columns don't (yet) support saving to all table formats or tables operations such as stacking. Thus, it is much better to include the coordinates as two columns of floats with names ``ra`` and ``dec`` into the table. Sources take a `~astropy.coordinates.SkyCoord` from the user to avoid any ambiguity about the coordinate systme used, but convert this into plain floats used in the photon table. ''' import os from datetime import datetime import numpy as np from scipy.stats import expon from astropy.table import Table import astropy.units as u from astropy.coordinates import SkyCoord, SkyOffsetFrame from ..base import SimulationSequenceElement from ..math.random import RandomArbitraryPdf from .. import __version__ as marxsversion @u.quantity_input def poisson_process(rate: (u.s * u.cm**2)**(-1)): '''Return a function that generates Poisson distributed times with rate ``rate``. Parameters ---------- rate : `~astropy.units.quantity.Quantity` Expectation value for the total rate of photons with unit 1 / cm**2 / s. Returns ------- poisson_rate : function Function that generates Poisson distributed times with rate ``rate``. ''' if not rate.isscalar: raise ValueError('"rate" must be scalar.') @u.quantity_input(exposuretime=u.s) def poisson_rate(exposuretime: u.s, geomarea: u.cm**2) -> u.s: '''Generate Poisson distributed times. Parameters ---------- exposuretime : `~astropy.units.quantity.Quantity` Exposure time geomarea : `~astropy.units.quantity.Quantity` Geometric opening area of telescope Returns ------- times : `~astropy.units.quantity.Quantity` Poisson distributed times. ''' fullrate = rate * geomarea # Make 10 % more numbers then we expect to need, because it's random times = expon.rvs(scale=1./fullrate.to(1 / u.s), size=int((exposuretime * fullrate * 1.1).to(u.dimensionless_unscaled))) # If we don't have enough numbers right now, add some more. while (times.sum() * u.s) < exposuretime: times = np.hstack([times, expon.rvs(scale=1/fullrate.to(1 / u.s), size=int(((exposuretime - times.sum() * u.s) * fullrate * 1.1).to(u.dimensionless_unscaled)))]) times = np.cumsum(times) * u.s return times[times < exposuretime] return poisson_rate class SourceSpecificationError(Exception): pass class Source(SimulationSequenceElement): '''Base class for all photons sources. This class provides a very general implementation of photons sources. Typically, it is not used directly, but a more specialized subclass, such as `PointSource` for an astronomical source or `LabPointSource` for a source at a finite distance. Most of the derived source support the same input argumets as `Source`, thus they are explained in detail here. Parameters ---------- flux : `~astropy.units.quantity.Quantity` or callable This sets the total flux from a source in photons/time/area. Options are: - quantity: Constant (not Poisson distributed) flux. - callable: Function that takes a total exposure time as input and returns an array of photon emission times between 0 and the total exposure time. energy : `~astropy.units.quantity.Quantity` or callable or `~astropy.table.QTable` This input decides the energy of the emitted photons. Possible formats are: - polarization. - `~astropy.units.quantity.Quantity`: Constant energy. - `astropy.table.Table`: Given this table, the code assumes a piecewise flat spectrum. The "energy" values contain the **upper** limit of each bin, the "fluxdensity" array the flux density in each bin. The first entry in the "fluxdensity" array is ignored, because the lower bound of this bin is undefined. The code draws an energy from this spectrum for every photon created. - A function or callable object: This option allows for full customization. The function must take an array of photon times as input and return an equal length array of photon energies `~astropy.units.quantity.Quantity`. polarization : `~astropy.units.quantity.Quantity`, ``None``, `~astropy.table.QTable`, or callable. There are several different ways to set the polarization angle of the photons for a polarized source. In all cases, the angle is measured North through East. (We ignore the special case of a polarized source exactly on a pole.) The default value is ``None`` (unpolarized source). - ``None`` : An unpolarized source. Every photons is assigned a random polarization. - `~astropy.units.quantity.Quantity` : Constant polarization angle for all photons - `~astropy.table.Table` : Table with columns called "angle" and "probabilitydensity". The summed probability density will automatically be normalized to one. Given this table, the code assumes a piecewise constant probability density. The "angle" values contain the **upper** limit of each bin. The first entry in the "probabilitydenisty" array is ignored, because the lower bound of this bin is undefined. - a callable (function or callable object): This option allows full customization. The function is called with two arrays (time and energy values) as input and must return an array of equal length that contains the polarization angles as `~astropy.units.quantity.Quantity` object. geomarea : `astropy.units.Quantity` or ``None`` Geometric opening area of telescope. If ``None`` then the flux must be given in photons per time, not per time per unit area. ''' def __init__(self, energy=1*u.keV, flux=1 / u.s / u.cm**2, polarization=None, geomarea=1*u.cm**2, **kwargs): self.energy = energy self.flux = flux self.polarization = polarization self.geomarea = 1 if geomarea is None else geomarea super().__init__(**kwargs) def __call__(self, *args, **kwargs): return self.generate_photons(*args, **kwargs) @u.quantity_input() def generate_times(self, exposuretime: u.s): if callable(self.flux): return self.flux(exposuretime, self.geomarea) elif hasattr(self.flux, 'isscalar') and self.flux.isscalar: return np.arange(0, exposuretime.to(u.s).value, 1. / (self.flux * self.geomarea * u.s).decompose()) * u.s else: raise SourceSpecificationError('`flux` must be a quantity or a callable.') @u.quantity_input() def generate_energies(self, t: u.s) -> u.keV: n = len(t) # function if callable(self.energy): en = self.energy(t) if len(en) != n: raise SourceSpecificationError('`energy` has to return an array of same size as input time array.') else: return en # astropy.table.QTable elif hasattr(self.energy, 'columns'): x = self.energy['energy'].to(u.keV).value y = (self.energy['fluxdensity'][1:]).to((u.s * u.cm**2 * u.keV)**(-1)).value y = np.hstack(([0], y)) rand = RandomArbitraryPdf(x, y) return rand(n) * u.keV # scalar quantity elif hasattr(self.energy, 'isscalar') and self.energy.isscalar: return np.ones(n) * self.energy.to(u.keV, equivalencies=u.spectral()) # anything else else: raise SourceSpecificationError('`energy` must be Quantity, function, or have columns "energy" and "fluxdensity".') @u.quantity_input() def generate_polarization(self, times: u.s, energies: u.keV) -> u.rad: n = len(times) # function if callable(self.polarization): pol = self.polarization(times, energies) if len(pol) != n: raise SourceSpecificationError('`polarization` has to return an array of same size as input time and energy arrays.') else: return pol elif self.polarization is None: return np.random.uniform(0, 2 * np.pi, n) * u.rad # astropy.table.QTable elif hasattr(self.polarization, 'columns'): x = self.polarization['angle'].to(u.rad).value y = (self.polarization['probabilitydensity']).to(1/u.rad).value rand = RandomArbitraryPdf(x, y) return rand(n) * u.rad # scalar quantity elif hasattr(self.polarization, 'isscalar') and self.polarization.isscalar: return np.ones(n) * self.polarization else: raise SourceSpecificationError('`polarization` must be number (angle), callable, None (unpolarized), 2.n array or have fields "angle" (in rad) and "probability".') def generate_photon(self): raise NotImplementedError @u.quantity_input() def generate_photons(self, exposuretime: u.s): '''Central function to generate photons. Calling this function generates a photon table according to the `flux`, `energy`, and `polarization` of this source. The number of photons depends on the total exposure time, which is a parameter of this function. Depending on the setting for `flux` the photons could be distributed equally over the interval 0..exposuretime or follow some other distribution. Parameters ---------- exposuretime : `astropy.quantity.Quantity` Total exposure time. Returns ------- photons : `astropy.table.Table` Table with photon properties. ''' times = self.generate_times(exposuretime) energies = self.generate_energies(times) pol = self.generate_polarization(times, energies) n = len(times) photons = Table([times.to(u.s).value, energies.to(u.keV).value, pol.to(u.rad).value, np.ones(n)], names=['time', 'energy', 'polangle', 'probability']) photons.meta['EXTNAME'] = 'EVENTS' photons.meta['EXPOSURE'] = (exposuretime.to(u.s).value, 'total exposure time [s]') #photons.meta['DATE-OBS'] = photons.meta['CREATOR'] = 'MARXS - Version {0}'.format(marxsversion) photons.meta["LONGSTRN"] = ("OGIP 1.0", "The OGIP long string convention may be used.") photons.meta['MARXSVER'] = (marxsversion, 'MARXS version') now = datetime.now() photons.meta['SIMDATE'] = (str(now.date()), 'Date simulation was run') photons.meta['SIMTIME'] = (str(now.time())[:10], 'Time simulation was started') photons.meta['SIMUSER'] = (os.environ.get('USER', 'unknown user'), 'User running simulation') photons.meta['SIMHOST'] = (os.environ.get('HOST', 'unknown host'), 'Host system running simulation') photons['time'].unit = u.s photons['energy'].unit = u.keV photons['polangle'].unit = u.rad return photons class AstroSource(Source): '''Astrophysical source with a sky position Parameters ---------- coords : `astropy.coordinates.SkySoord` (preferred) Position of the source on the sky. If ``coords`` is not a `~astropy.coordinates.SkyCoord` object itself, it is used to initialize such an object. See `~astropy.coordinates.SkyCoord` for a description of allowed input values. ''' def __init__(self, **kwargs): coords = kwargs.pop('coords') if isinstance(coords, SkyCoord): self.coords = coords else: self.coords = SkyCoord(coords) if not self.coords.isscalar: raise ValueError("Coordinate must be scalar, not array.") super().__init__(**kwargs) def set_pos(self, photons, coo): '''Set Ra, Dec of photons in table This function write Ra, Dec to a table. It is defined here to make the way `astropy.coordinates.SkyCoord` objects are stored more uniform. Currently, mixin columns in tables have some disadvantages, e.g. they cause errors on writing and on stacking. Thus, we store the coordinates as plain numbers. Since that format is not unique (e.g. units could be deg or rad), system could be ICRS, FK4, FK5 or other this conversion is done here for all astrononimcal sources. This also makes it easier to change that design in the future. Parameters ---------- photons : `astropy.table.Table` Photon table. Columns ``ra`` and ``dec`` will be added or overwritten. coo : `astropy.coords.SkyCoord` Photon coordinates ''' photons['ra'] = coo.icrs.ra.deg photons['dec'] = coo.icrs.dec.deg photons['ra'].unit = u.degree photons['dec'].unit = u.degree photons.meta['COORDSYS'] = ('ICRS', 'Type of coordinate system') class PointSource(AstroSource): '''Astrophysical point source. Parameters ---------- kwargs : see `Source` Other keyword arguments include ``flux``, ``energy`` and ``polarization``. See `Source` for details. ''' def __init__(self, **kwargs): super().__init__(**kwargs) @u.quantity_input def generate_photons(self, exposuretime: u.s): photons = super().generate_photons(exposuretime) self.set_pos(photons, self.coords) return photons class RadialDistributionSource(AstroSource): '''Base class for sources where photons follow some radial distribution on the sky Parameters ---------- radial_distribution : callable A function that takes an interger as input, which specifies the number of photons to produce. The output must be an `astropy.units.Quantity` object with n angles in it. func_par : object ``radial_distribution`` has access to ``self.func_par`` to hold function parameters. This could be, e.g. a tuple with coeffications. kwargs : see `Source` Other keyword arguments include ``flux``, ``energy`` and ``polarization``. See `Source` for details. ''' def __init__(self, **kwargs): self.func = kwargs.pop('radial_distribution') self.func_par = kwargs.pop('func_par', None) super().__init__(**kwargs) @u.quantity_input def generate_photons(self, exposuretime: u.s): '''Photon positions are generated in a frame that is centered on the coordinates set in ``coords``, then they get transformed into the global sky system. ''' photons = super().generate_photons(exposuretime) relative_frame = SkyOffsetFrame(origin=self.coords) n = len(photons) phi = np.random.rand(n) * 2. * np.pi * u.rad d = self.func(n) relative_coords = SkyCoord(d * np.sin(phi), d * np.cos(phi), frame=relative_frame) origin_coord = relative_coords.transform_to(self.coords) self.set_pos(photons, origin_coord) return photons class SphericalDiskSource(RadialDistributionSource): '''Astrophysical source with the shape of a circle or ring. The `DiskSource` makes a small angle approximation. In contrast, this source implements the full spherical geometry at the cost of running slower. For radii less than a few degrees the difference is negligible and we recommend use of the faster `DiskSource`. Parameters ---------- a_inner, a_outer : `astropy.coordinates.Angle` Inner and outer angle of the ring (e.g. in arcsec). The default is a disk with no inner hole (``a_inner`` is set to zero.) ''' def __init__(self, **kwargs): kwargs['func_par'] = [kwargs.pop('a_outer'), kwargs.pop('a_inner', 0. * u.rad)] kwargs['radial_distribution'] = self.radial_distribution super().__init__(**kwargs) def radial_distribution(self, n): '''Radial distribution function. See http://6degreesoffreedom.co/circle-random-sampling/ for an explanation of how to derive the formula. Note however, that here we use sin instead of cos because we measure the angle from the top. ''' u = np.random.rand(n) return np.arccos(np.cos(self.func_par[1]) * (1. - u) + u * np.cos(self.func_par[0])) class DiskSource(RadialDistributionSource): '''Astrophysical source with the shape of a circle or ring. This source uses a small angle approximation which is valid for radii less than a few degrees and runs much faster. See ``SphericalDiskSource`` for an implementation using full spherical geometry. Parameters ---------- a_inner, a_outer : `astropy.coordinates.Angle` Inner and outer angle of the ring (e.g. in arcsec). The default is a disk with no inner hole (``a_inner`` is set to zero.) ''' def __init__(self, **kwargs): kwargs['func_par'] = [kwargs.pop('a_outer'), kwargs.pop('a_inner', 0. * u.rad)] kwargs['radial_distribution'] = lambda n: np.sqrt(self.func_par[1]**2 + np.random.rand(n) * (self.func_par[0]**2 - self.func_par[1]**2)) super().__init__(**kwargs) class GaussSource(AstroSource): '''Astrophysical source with a Gaussian brightness profile. This source uses a small angle approximation which is valid for radii less than a few degrees. Parameters ---------- sigma : `astropy.coordinates.Angle` Gaussian sigma setting the width of the distribution ''' def __init__(self, **kwargs): self.sigma = kwargs.pop('sigma') super().__init__(**kwargs) @u.quantity_input() def generate_photons(self, exposuretime: u.s): '''Photon positions are generated in a frame that is centered on the coordinates set in ``coords``, then they get transformed into the global sky system. ''' photons = super().generate_photons(exposuretime) relative_frame = SkyOffsetFrame(origin=self.coords) n = len(photons) relative_coords = SkyCoord(np.random.normal(scale=self.sigma.value, size=n) * self.sigma.unit, np.random.normal(scale=self.sigma.value, size=n) * self.sigma.unit, frame=relative_frame) origin_coord = relative_coords.transform_to(self.coords) self.set_pos(photons, origin_coord) return photons class SymbolFSource(AstroSource): '''Source shaped like the letter F. This source provides a non-symmetric source for testing purposes. Parameters ---------- size : `astropy.units.quantity` angular size kwargs : see `Source` Other keyword arguments include ``flux``, ``energy`` and ``polarization``. See `Source` for details. ''' def __init__(self, **kwargs): self.size = kwargs.pop('size', 1. * u.degree) super().__init__(**kwargs) @u.quantity_input() def generate_photons(self, exposuretime: u.s): photons = super().generate_photons(exposuretime) n = len(photons) elem = np.random.choice(3, size=n) ra = np.ones(n) * self.coords.icrs.ra dec = np.ones(n) * self.coords.icrs.dec size = self.size ra[elem == 0] += size * np.random.random(np.sum(elem == 0)) ra[elem == 1] += size dec[elem == 1] += 0.5 * size * np.random.random(np.sum(elem == 1)) ra[elem == 2] += 0.8 * size dec[elem == 2] += 0.3 * size * np.random.random(np.sum(elem == 2)) self.set_pos(photons, SkyCoord(ra, dec, frame=self.coords)) return photons
Chandra-MARX/marxs
marxs/source/source.py
Python
gpl-3.0
21,002
[ "Gaussian" ]
f7dc5c7568e7636fd46c684266c6a66f7b147637dbe4fe758ef437141137ea36
""" Rbf - Radial basis functions for interpolation/smoothing scattered Nd data. A radial basis function (RBF) is a real-valued function whose value depends only on the distance from the origin. Rbf are typically used to build up function approximations of the form: $ y(x) = \sum_{i=1}^N w_i \, \phi(||x - x_i||) $, where the approximating function $y(x)$ is represented as a sum of $N$ Rbfs, each associated with a different center $x_i$, and weighted by an appropriate coefficient $w_i$. The weights $w_i$ can be estimated using the matrix methods of Weighted (linear) least squares, given that the approximating function is ''linear'' in the weights. It can be shown that any continuous function on a compact interval can in principle be interpolated with arbitrary accuracy by a sum of this form, if a sufficiently large number N of radial basis functions is used. You can consider this approximation function $y(x)$ as a Nd-barycentric interpolation A rather simple neural-network ------------------------------ This approximation process can also be interpreted as a rather simple single-layer type of artificial neural network called a "radial basis function network", with the radial basis functions taking on the role of the activation functions of the network. [Park] The approximation $y(x)$ is differentiable with respect to the weights $wi$. The weights could thus be learned using any of the standard iterative methods for neural networks. Using radial basis functions in this manner yields a reasonable interpolation approach provided that the fitting set has been chosen such that it covers the entire range systematically (equidistant data points are ideal). However, estimates outside the fitting set tend to perform poorly. references ---------- wikipedia: http://en.wikipedia.org/wiki/Radial_basis_function Buhmann: Buhmann, Martin D. (2003), "Radial Basis Functions: Theory and Implementations", Cambridge University Press, ISBN 978-0-521-63338-3. Park: Park, J., Sandberg, I. W., "Universal Approximation Using Radial-Basis-Function Networks" http://www.ise.ncsu.edu/fangroup/ie789.dir/Park.pdf Fornberg: Bengt Fornberg, Julia Zuev, "The runge phenomenon and spatially variable shape parameters in rbf interpolation" http://amath.colorado.edu/faculty/fornberg/Docs/fz_var_eps.pdf """ from scipy.lib.six import get_function_code import numpy as np __all__ = [ 'RbfInterpolator', 'cubic', 'euclidean_norm', 'gaussian', 'inverse_multiquadric', 'linear', 'multiquadric', 'polyharmonic', 'thin_plate'] #distance functions def euclidean_norm(x1, x2): return np.sqrt(((x1 - x2) ** 2).sum(axis=0)) #Radial basic functions def multiquadric(r, epsilon=1.): return np.sqrt((1.0 / epsilon * r) ** 2 + 1) def inverse_multiquadric(r, epsilon=1.): return 1.0 / multiquadric(r, epsilon) def gaussian(r, epsilon=1.): return np.exp(-(1.0 / epsilon * r) ** 2) def linear(r): return r def cubic(r): return r ** 3 def polyharmonic(r, k=5): if (k % 2) == 0: return r ** k * np.log(r) else: return r ** k def thin_plate(r): result = r ** 2 * np.log(r) result[r == 0] = 0 # the spline is zero at zero return result class RbfInterpolator(object): """ Rbf(*args) A class for radial basis function approximation/interpolation of n-dimensional scattered data. Parameters ---------- *args : sequence of ndarrays x1, x2, ... xn, y, where xi are the coordinates of the sampling points and y is the array of values at the nodes function : callable, optional The radial basis function (Rbf). (default: 'multiquadric') Using any callable as radial function is possible. The function must take 1 argument (radii) and the epsilon parameter will be given if a keyword of the same name is defined. Other keyword arguments (**kwargs) will also be forwarded. epsilon : float, optional Adjustable constant for gaussian or multiquadrics functions defaults: mean(radii) smooth : float, optional Values greater than zero increase the smoothness of the approximation. (default: 0, function will go through all nodal points) norm : callable, optional A distance function between 2 vectors (xi, xj) (default: euclidean_norm) Example Usage ------------- fitting a Gaussian in 3D >>> x = np.arange(-1, 1, 0.1) >>> y = np.arange(-1, 1, 0.1) >>> X = np.asarray([ k for k in np.nditer(np.ix_(x, y))]) >>> Y = Y = np.exp(-0.5 * (X ** 2).sum(axis=1)) >>> rb = RbfInterpolator(X[:, 0], X[:, 1], Y) # radial basis function interpolator instance >>> Yn = rb(X[:, 0], X[:, 1]) # interpolated values >>> ((Yn - Y) / Y).ptp() < 1e-5 """ def __init__(self, *args, **kwargs): self.xi = np.asarray([np.asarray(a, dtype=np.float_).flatten() for a in args[:-1]]) self.N = self.xi.shape[-1] self.di = np.asarray(args[-1]).flatten() if self.xi.shape[1] != self.di.size: raise ValueError("All arrays must be equal length.") self.norm = kwargs.pop('norm', euclidean_norm) self.epsilon = kwargs.pop('epsilon', None) self.smooth = kwargs.pop('smooth', 0.0) self.function = kwargs.pop('function', multiquadric) self._fn_eps = 'epsilon' in get_function_code(self.function).co_varnames r = self.distance_matrix(self.xi, self.xi) if self._fn_eps & (self.epsilon is None): self.epsilon = r.mean() self._extra_kwargs = kwargs # in case it as any use when calling function self.A = self.phi(r) if self.smooth > 0: self.A -= np.eye(self.N) * self.smooth self.nodes = np.linalg.solve(self.A, self.di) def phi(self, r, **kwargs): if self._fn_eps: kwargs['epsilon'] = self.epsilon if len(self._extra_kwargs) > 0: kwargs.update(kwargs, **self._extra_kwargs) return self.function(r, **kwargs) def update_yi(self, yi): """ allow a quick update of the node values without re-generating a full object """ di = np.asarray(yi).flatten() if self.xi.shape[1] != di.size: raise ValueError("All arrays must be equal length.") self.di = di self.nodes = np.linalg.solve(self.A, self.di) def distance_matrix(self, x1, x2): """ array of distance parameters ---------- x1: ndarray array of points x2: ndarray array of points returns ------- d: ndarray, shape(len(x1), len(x2) matrix of the pairwise-distances from each point in x1 to each point in x2. """ if len(x1.shape) == 1: x1 = x1[np.newaxis, :] if len(x2.shape) == 1: x2 = x2[np.newaxis, :] x1 = x1[..., :, np.newaxis] x2 = x2[..., np.newaxis, :] return self.norm(x1, x2) def __call__(self, *args): args = [np.asarray(x) for x in args] if any([x.shape != y.shape for x in args for y in args]): raise ValueError("Array lengths must be equal") shp = args[0].shape self.xa = np.asarray([a.flatten() for a in args], dtype=np.float_) r = self.distance_matrix(self.xa, self.xi) return np.dot(self.phi(r), self.nodes).reshape(shp)
mfouesneau/faststats
faststats/interpolate/bary.py
Python
mit
7,536
[ "Gaussian" ]
71f80531f19dd7c8f49234d41c7becf870d04fbaec881d7d513b71484491a2ec
import numpy as np from joblib import Parallel, delayed from scipy.special import wofz from scipy.optimize import curve_fit from protoclass.data_management import RDAModality from protoclass.preprocessing import MRSIPhaseCorrection from protoclass.preprocessing import MRSIFrequencyCorrection from fdasrsf import srsf_align path_mrsi = '/data/prostate/experiments/Patient 1036/MRSI/CSI_SE_3D_140ms_16c.rda' def _voigt_profile(x, alpha, mu, sigma, gamma): """Private function to fit a Voigt profile. Parameters ---------- x : ndarray, shape (len(x)) The input data. alpha : float, The amplitude factor. mu : float, The shift of the central value. sigma : float, sigma of the Gaussian. gamma : float, gamma of the Lorentzian. Returns ------- y : ndarray, shape (len(x), ) The Voigt profile. """ # Define z z = ((x - mu) + 1j * gamma) / (sigma * np.sqrt(2)) # Compute the Faddeva function w = wofz(z) return alpha * (np.real(w)) / (sigma * np.sqrt(2. * np.pi)) def _fit_voigt_water(ppm, spectra): """Private function to fit water residual in one spectra. Parameters ---------- ppm : ndarray, shape (n_samples, ) The PPM array. spectra : ndarray, shape (n_samples, ) The spectra on which the water has to be fitted. Returns ------- popt : list of float, A list of the fitted parameters. """ # Get the value between of the spectra between 4 and 6 water_limits = (4., 6.) sub_ppm = ppm[np.flatnonzero(np.bitwise_and(ppm > water_limits[0], ppm < water_limits[1]))] sub_spectra = spectra[np.flatnonzero(np.bitwise_and( ppm > water_limits[0], ppm < water_limits[1]))] # Define the default parameters amp_dft = np.max(sub_spectra) / _voigt_profile(0., 1., 0., 1., 1.) popt_default = [amp_dft, 1., 1., 1.] # Define the bound param_bounds = ([0., 0., 0., 0.], [np.inf, np.inf, np.inf, np.inf]) try: popt, _ = curve_fit(_voigt_profile, sub_ppm, np.real(sub_spectra), p0=popt_default, bounds=param_bounds) except RuntimeError: popt = popt_default return popt rda_mod = RDAModality(1250.) rda_mod.read_data_from_path(path_mrsi) phase_correction = MRSIPhaseCorrection(rda_mod) rda_mod = phase_correction.transform(rda_mod) freq_correction = MRSIFrequencyCorrection(rda_mod) rda_mod = freq_correction.fit(rda_mod).transform(rda_mod) # # Process all the different spectra # all_spectra = np.reshape(rda_mod.data_, (rda_mod.data_.shape[0], # rda_mod.data_.shape[1] * # rda_mod.data_.shape[2] * # rda_mod.data_.shape[3])).T # popts = Parallel(n_jobs=-1)(delayed(_fit_voigt_water)(rda_mod.bandwidth_ppm, # spectra) # for spectra in all_spectra)
I2Cvb/prostate
scratch/water_detection.py
Python
mit
3,093
[ "Gaussian" ]
e4a82a50473b6aac0ef0cc9e5d939935eeb950ad08bdf186df39fc1c3fd713cb
#!/usr/bin/python import HTSeq import sys from Bio.Seq import Seq from Bio.Alphabet import generic_dna from Bio.Blast import NCBIXML from Bio.Blast.Applications import NcbiblastnCommandline from Bio.Blast.Applications import NcbiblastpCommandline from Counter import Counter # --- Counter.py from python3 is needed to run this script --- # #from collections import Counter import os import os.path import string import argparse import subprocess from CommonFastaFunctions import Create_Blastdb from CommonFastaFunctions import LoadAlelleFasta from CommonFastaFunctions import LoadAlellicProfileGeneric from CommonFastaFunctions import WriteFasta from CommonFastaFunctions import runBlast from CommonFastaFunctions import runBlastParser from Genes import Gene from Genes import SetOfGenes import time import pickle def reverseComplement(strDNA): basecomplement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} strDNArevC = '' for l in strDNA: strDNArevC += basecomplement[l] return strDNArevC[::-1] def extendCDS(contigTag, cdsDict, matchStart, matchStop, contigsDict, maxsize,minsize,sizeratio): # ---------------------- # # Trying to extending it # # ---------------------- # CDStype='' cdsString = '' # --- tests if the cds contains the gene --- # if contigTag not in cdsDict: print "\nCheck if this contig contains CDSs:", contigTag #print cdsDict.keys() return False, '', CDStype listCdsInds = cdsDict[ contigTag ] # --- test if it is needed to invert the sequence --- # if matchStart > matchStop: aux = matchStart matchStart = matchStop matchStop = aux cdsIntersectMatch=[] for j in listCdsInds: # cds start tS = j[0] # cds stop tE = j[1] cdslen=tE-tS #print tS , matchStart #print tE, matchStop #print cdslen if(matchStart>= tS and (matchStop<=tE or matchStart<tE)): #if CDS start is before match start and the CDS is either after or before the match end cdsIntersectMatch.append(j) elif (matchStart<= tS and tS<=matchStop): #if CDS start inside of the match (doesnt matter where it ends) cdsIntersectMatch.append(j) elif (matchStop< tS and matchStop< tE): #if CDS start after and finish after the match ends cycle break #print cdsIntersectMatch biggestlenInsideMatch=0 lenInsideMatch=0 for j in cdsIntersectMatch: # cds start tS = j[0] # cds stop tE = j[1] cdslen=tE-tS maxratio=float(cdslen) / float(maxsize) minratio=float(cdslen) / float(minsize) #get length of the CDS against the match if tE<=matchStop and tS<matchStart: #if CDS starts before and ends inside match lenInsideMatch=tE-matchStart # ___bm____ #__________ # _______ elif tE<=matchStop and tS>=matchStart: # if CDS starts inside and finishes inside the match lenInsideMatch=tE-tS # ___bm____ # ____ elif tE>matchStop and tS<matchStart: # if CDS is bigger than match lenInsideMatch=matchStop-matchStart # ___bm____ # _____________ elif tE>=matchStop and tS>matchStart: # if CDS starts inside match and ends outside lenInsideMatch=matchStop-tS # ___bm____ # __________ # _______ else : #same size lenInsideMatch=tE-matchStart # ___bm____ # _________ #if(biggestlenInsideMatch<lenInsideMatch and ((maxratio<1+sizeratio and maxratio>=1) or (minratio>1-sizeratio and minratio<=1))): if(biggestlenInsideMatch<lenInsideMatch and (maxratio<1+sizeratio and minratio>1-sizeratio)): biggestlenInsideMatch=lenInsideMatch if (matchStart==tS and tE == matchStop): cdsString = contigsDict[ contigTag ][ tS:tE ].upper() CDStype = "same size as match" # ___bm____ # _________ elif (matchStart>tS and tE == matchStop): cdsString = contigsDict[ contigTag ][ tS:tE ].upper() CDStype = "stop codon in match end" # ___bm____ # ___________ elif (matchStart==tS and tE > matchStop): cdsString = contigsDict[ contigTag ][ tS:tE ].upper() CDStype = "start codon in match beggining" # ___bm____ # ___________ elif (matchStart==tS and tE > matchStop): cdsString = contigsDict[ contigTag ][ tS:tE ].upper() CDStype = "bigger than match" # ___bm____ # _____________ elif (matchStart<=tS and tE < matchStop): cdsString = contigsDict[ contigTag ][ tS:tE ].upper() CDStype = "cds inside match" # ___bm____ # ____ elif (matchStart<=tS and tE > matchStop): cdsString = contigsDict[ contigTag ][ tS:tE ].upper() CDStype = "start codon inside match" # ___bm____ # __________ else : # two cds in same match?? cdsString = contigsDict[ contigTag ][ tS:tE ].upper() CDStype = "stop codon inside match" # ___bm____ #__________ if cdsString =='' : return False, cdsString, CDStype else: return True, cdsString, CDStype def printinfo(genome, gene): print "_______________________________________________________" print "Genome : "+ str(os.path.basename(genome)) print "Locus : "+ str(os.path.basename(gene)) # ======================================================== # # Allele calling and classification # # ======================================================== # def main(): try: input_file = sys.argv[1] temppath = sys.argv[2] except IndexError: print "usage: list_pickle_obj" argumentList=[] with open(input_file,'rb') as f: argumentList = pickle.load(f) geneFile = argumentList[0] genomesList = argumentList[1] #listOfCDSDicts = argumentList[2] basepath=temppath gene_fp = HTSeq.FastaReader(geneFile) geneDict = {} alleleI = 1 inverted=False orderedAlleleNames=[] biggestAllelelen=0 smallestAllelelen=99999 for allele in gene_fp: if allele.seq in geneDict: print "\nWARNING: this file contains a repeated allele, it should be checked. Ignoring it now!\n", geneFile else: if len(allele.seq)>biggestAllelelen: biggestAllelelen=len(allele.seq) if len(allele.seq)<smallestAllelelen: smallestAllelelen=len(allele.seq) orderedAlleleNames.append(allele.name) geneDict[ allele.seq ] = alleleI alleleI += 1 #print geneDict #print orderedAlleleNames # --- make 1st blast DB --- # Gene_Blast_DB_name = Create_Blastdb( geneFile, 1, False ) geneF = os.path.basename(geneFile) blast_out_file = os.path.dirname(geneFile)+"/blastdbs/"+geneF + '.xml' # list of results - the output of the function resultsList = [] i = 0 perfectMatchIdAllele=[] genomeDict = {} for genomeFile in genomesList: #currentCDSDict = listOfCDSDicts[i] filepath=os.path.join(basepath,str(os.path.basename(genomeFile))+"_ORF.txt") with open(filepath,'rb') as f: currentCDSDict = pickle.load(f) g_fp = HTSeq.FastaReader( genomeFile ) for contig in g_fp: sequence=str(contig.seq) genomeDict[ contig.name ] = sequence currentGenomeDict = genomeDict #print genomeFile #print resultsList #print geneDict #print orderedAlleleNames i+=1 # it has to be incremented here if genomeFile[-1] == '\n': genomeFile = genomeFile[:-1] # ------------------------------ RUNNING BLAST ------------------------------ # #print Gene_Blast_DB_name #cline = NcbiblastnCommandline(query=genomeFile, db=Gene_Blast_DB_name, evalue=0.001, out=blast_out_file, outfmt=5) cline = NcbiblastnCommandline(query=genomeFile, db=Gene_Blast_DB_name, evalue=0.001, out=blast_out_file, outfmt=5) #print cline blast_records = runBlastParser(cline, blast_out_file, genomeFile) print ("Finished Blast at : "+time.strftime("%H:%M:%S-%d/%m/%Y")) # ------ DETERMINING BEST MATCH ------ # # bestMatch = ['rec.query','hsp', lenRatio] bestMatch = ['','', 0] bestMatchContig='' bestMatchContigLen='' bestalignlen=0 perfectMatch=False bmAlleleLen2=0 bmAllele='' #noAlignment=False for blast_record in blast_records: # --- the LNF cases are now called outside de loop --- # #print blast_record if perfectMatch==True: break try: #print blast_record.alignments hspC = blast_record.alignments[0] if bestMatch[0] == '' and bestMatch[1] == '': bestMatch[0] = blast_record.query bestMatch[1] = hspC except IndexError: continue # --- the contig tag is used in the progigal function --- # contigTag = blast_record.query # --- brute force parsing of the contig tag - better solution is advisable --- # j=0 for l in contigTag: if l == ' ': break j+=1 contigTag = contigTag[:j] contigLen = blast_record.query_letters #print blast_record.query_id # --- iterating over all the results to determine the best match --- # for alignment in blast_record.alignments: index=orderedAlleleNames.index(alignment.hit_def) #print alignment.hit_def for k, v in geneDict.iteritems(): if v == index+1: bmAlleleLen2= len(k) if perfectMatch: break for match in alignment.hsps: #print match scoreRatio = float(match.score) / float(bmAlleleLen2) #print alignment.hit_def #print match.identities #print bmAlleleLen2 #print #print match.identities #print len(match.sbjct) #if #identities is the same as the length of the allele and it has no gaps or N's if (int(match.identities)==int(bmAlleleLen2) and int(match.identities)==int(len(match.sbjct)) and "N" not in match.query and "K" not in match.query and "R" not in match.query ): index=orderedAlleleNames.index(alignment.hit_def) for seq, alleleid in geneDict.iteritems(): if alleleid == index+1: bmAllele=seq break bmAlleleLen= len(bmAllele) lenratio=float(len(match.query))/float(bmAlleleLen) bestMatch = [blast_record.query, match, scoreRatio, alignment.hit_def,lenratio,bmAlleleLen] bestMatchContig=contigTag perfectMatch=True index=orderedAlleleNames.index(alignment.hit_def) bmAlleleLen= len(geneDict.keys()[index]) break #choose the match with the best score ratio score/length of allele elif scoreRatio > bestMatch[2]: index=orderedAlleleNames.index(alignment.hit_def) for seq, alleleid in geneDict.iteritems(): if alleleid == index+1: bmAllele=seq break bmAlleleLen= len(bmAllele) lenratio=float(len(match.query))/float(bmAlleleLen) bestMatch = [blast_record.query, match, scoreRatio, alignment.hit_def,lenratio,bmAlleleLen] bestMatchContig=contigTag bestMatchContigLen=blast_record.query_letters if match.sbjct_start > match.sbjct_end: inverted=True #print match.query bestalignlen=alignment.length #print match #print bmAlleleLen, bestMatchContig if perfectMatch==True: break # ---------- ALLELE CALLING AFTER DETERMINING BEST MATCH ---------- # print ("Finished choosing best match at : "+time.strftime("%H:%M:%S-%d/%m/%Y")) try: #print bestMatch[0] match = bestMatch[1] #print match.query geneLen = bestMatch[5] alleleStr = match.query nIdentities = match.identities idPercent = float(nIdentities) / float(geneLen) scoreRatio = bestMatch[2] lenRatio = bestMatch[4] #print perfectMatch #print "\nContig best/exact match is :" #print bestMatchContig +"\n" except: #if no best match was found ################### # LOCUS NOT FOUND # ################### resultsList.append('LNF3:-1') # append result to the list of results perfectMatchIdAllele.append('LNF') printinfo(genomeFile,geneFile) print "Locus not found, no matches \n" continue if perfectMatch is True: #if a perfect match was found if match.sbjct_start > match.sbjct_end: #reverse the order if needed alleleStr = reverseComplement(alleleStr) alleleNumber = geneDict[ alleleStr ] ################################################ # EXACT MATCH --- MATCH == GENE --- GENE FOUND # ################################################ if "_" in bestMatch[3]: a=bestMatch[3].split("_") perfectMatchIdAllele.append(a[1]) else: perfectMatchIdAllele.append(bestMatch[3]) resultsList.append('EXC:' + str(alleleNumber) ) elif bestMatch[0] != '' and perfectMatch is not True: #if a best match was found but it's not an exact match ########################### # LOCUS ON THE CONTIG TIP # ########################### if match.query_start ==1 and len(match.query) < geneLen: resultsList.append('LOT5:-1') perfectMatchIdAllele.append('LOT5') printinfo(genomeFile,geneFile) print "Locus is on the 5' tip of the contig \n" elif match.query_end == bestMatchContigLen and len(match.query) < geneLen: resultsList.append('LOT3:-1') perfectMatchIdAllele.append('LOT3') printinfo(genomeFile,geneFile) print "Locus is on the 3' tip of the contig \n" elif bestMatchContigLen <= geneLen: resultsList.append('LOTSC:-1') perfectMatchIdAllele.append('LOTSC') printinfo(genomeFile,geneFile) #print match.query_start print "Locus is bigger than the contig \n" elif 'N' in alleleStr: #TODO gravar para ficheiro ##################### # ALLELE NOT FOUND # # N base found! ##################### geneFile2= os.path.splitext(geneFile)[0] + "LNFN.fasta" print geneFile2 with open(geneFile2, 'a') as f: f.write(">"+ (str(os.path.basename(genomeFile)))+"|"+(str(os.path.basename(geneFile)))+"\n") f.write((alleleStr) +"\n") resultsList.append('LNFN:-1') perfectMatchIdAllele.append('LNFN') printinfo(genomeFile,geneFile) print "LNFN, contains N bases! \n" else: # ------------------------------------------------------------------------------------------------------- # # # # USING PRODIGAL TO TRY TO EXTEND CDS # # # # ------------------------------------------------------------------------------------------------------- # CDSType='' sizeratio=0.2 ORFFoundInMatch, strCDS, CDSType = extendCDS(bestMatchContig, currentCDSDict, match.query_start, match.query_end, currentGenomeDict, biggestAllelelen, smallestAllelelen,sizeratio) # --- if it was possible to extend it using prodigal --- # print ("Finished extension at : "+time.strftime("%H:%M:%S-%d/%m/%Y")) #print ORFFoundInMatch #print strCDS #print CDSType isContainedDefinedAllele = False try: if ORFFoundInMatch : alleleStr = strCDS if match.sbjct_start > match.sbjct_end: #reverse the order if needed alleleStr = reverseComplement(alleleStr) lenRatio = float(len(strCDS)) / float(geneLen) defAllele=[] if alleleStr in geneDict: #if ORF found is already defined alleleNumber = geneDict[ alleleStr ] ################################################ # EXACT MATCH --- MATCH == GENE --- GENE FOUND # ################################################ perfectMatchIdAllele.append(alleleNumber) resultsList.append('EXC2:' + str(alleleNumber) ) else: ####################### # ADD INFERRED ALLELE # # a new allele that was extended with prodigal ####################### if(CDSType=='stop codon in match end'): tagAux = 'INF1:' elif(CDSType=='start codon in match beggining'): tagAux = 'INF2:' elif(CDSType=='bigger than match'): tagAux = 'INF3:' elif(CDSType=='same size as match'): tagAux = 'INF4:' elif(CDSType=='cds inside match'): tagAux = 'INF5:' elif(CDSType=='start codon inside match'): tagAux = 'INF6:' else: tagAux = 'INF7:' print "infered allele has location : "+(CDSType) printinfo(genomeFile,geneFile) perfectMatchIdAllele.append( tagAux +"-"+str(alleleI)) print "New allele Infered with prodigal! Adding allele "+ tagAux + str(alleleI) +" to the database\n" geneDict[alleleStr] = alleleI resultsList.append( tagAux + str(alleleI) ) orderedAlleleNames.append('allele_' + str(alleleI) + '_' + tagAux[:-1] +"_" +str(os.path.basename(genomeFile))) # --- add the new allele to the gene fasta --- # fG = open( geneFile, 'a' ) fG.write('>allele_' + str(alleleI) + '_' + tagAux[:-1] +"_" + str(os.path.basename(genomeFile)) + '\n') #print alleleStr fG.write( alleleStr + '\n') fG.close() alleleI += 1 # --- remake blast DB --- # Gene_Blast_DB_name = Create_Blastdb( geneFile, 1,False ) else: ################## # LNF WTFFF # ################## geneFile2= os.path.splitext(geneFile)[0] + "LNF2.fasta" print geneFile2 with open(geneFile2, 'a') as f: f.write(">"+ (str(os.path.basename(genomeFile)))+"|"+(str(os.path.basename(geneFile)))+" | "+str(bestMatchContig)+"\n") f.write((alleleStr) +"\n") f.write(">Allele\n") f.write((bmAllele)+"\n") resultsList.append('LNF2') printinfo(genomeFile,geneFile) perfectMatchIdAllele.append("LNF2") print "CDS not found" except: if ORFFoundInMatch : alleleStr = strCDS ################## # LNF WTFFF # ################## geneFile2= os.path.splitext(geneFile)[0] + "LNF99.fasta" print geneFile2 with open(geneFile2, 'a') as f: f.write(">"+ (str(os.path.basename(genomeFile)))+"|"+(str(os.path.basename(geneFile)))+" | "+str(bestMatchContig)+"\n") f.write((alleleStr) +"\n") f.write(">Allele\n") f.write((bmAllele)+"\n") resultsList.append('LNF99') printinfo(genomeFile,geneFile) perfectMatchIdAllele.append("LNF99") print "A problem occurred" final = (resultsList,perfectMatchIdAllele) #return (resultsList) print ("Finished allele calling at : "+time.strftime("%H:%M:%S-%d/%m/%Y")) filepath=os.path.join(basepath , os.path.basename(geneFile)+"_result.txt") #print filepath with open(filepath, 'wb') as f: pickle.dump(final, f) return True if __name__ == "__main__": main()
mickaelsilva/pythonscripts
AlleleCalling/cluster_versions/callAlleles.py
Python
gpl-2.0
18,815
[ "BLAST", "HTSeq" ]
d9825302dec1556970bbcc77129c6cf8b621e3ed057acaf75c7b97b2e834725c
# # @BEGIN LICENSE # # Psi4: an open-source quantum chemistry software package # # Copyright (c) 2007-2016 The Psi4 Developers. # # The copyrights for code used from other parties are included in # the corresponding files. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # @END LICENSE # from __future__ import absolute_import from __future__ import print_function import re import struct from collections import defaultdict from decimal import Decimal from .pdict import PreservingDict from .periodictable import * from .physconst import * from .exceptions import * from .molecule import Molecule from .orient import OrientMols from .options import conv_float2negexp def harvest_output(outtext): """Function to separate portions of a CFOUR output file *outtest*, divided by xjoda. """ pass_psivar = [] pass_coord = [] pass_grad = [] for outpass in re.split(r'--invoking executable xjoda', outtext, re.MULTILINE): psivar, c4coord, c4grad = harvest_outfile_pass(outpass) pass_psivar.append(psivar) pass_coord.append(c4coord) pass_grad.append(c4grad) #print '\n\nXXXXXXXXXXXXXXXXXXXXXXXXXX\n\n' #print outpass #print psivar, c4coord, c4grad #print psivar, c4grad #print '\n\nxxxxxxxxxxxxxxxxxxxxxxxxxx\n\n' retindx = -1 if pass_coord[-1] else -2 # print ' <<< C4 PSIVAR >>>' # for item in pass_psivar[retindx]: # print(' %30s %16.8f' % (item, pass_psivar[retindx][item])) # print ' <<< C4 COORD >>>' # for item in pass_coord[retindx]: # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2])) # print ' <<< C4 GRAD >>>' # for item in pass_grad[retindx]: # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2])) return pass_psivar[retindx], pass_coord[retindx], pass_grad[retindx] def harvest_outfile_pass(outtext): """Function to read CFOUR output file *outtext* and parse important quantum chemical information from it in """ psivar = PreservingDict() psivar_coord = None psivar_grad = None # TODO: BCC # CI # QCISD(T) # other ROHF tests # vcc/ecc NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))" # Process NRE mobj = re.search(r'^\s+' + r'(?:Nuclear repulsion energy :)' + r'\s+' + NUMBER + r'\s+a\.u\.\s*$', outtext, re.MULTILINE) if mobj: print('matched nre') psivar['NUCLEAR REPULSION ENERGY'] = mobj.group(1) # Process SCF mobj = re.search( r'^\s+' + r'(?:E\(SCF\))' + r'\s+=\s+' + NUMBER + r'\s+a\.u\.\s*$', outtext, re.MULTILINE) if mobj: print('matched scf1') psivar['SCF TOTAL ENERGY'] = mobj.group(1) mobj = re.search( r'^\s+' + r'(?:E\(SCF\)=)' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE) if mobj: print('matched scf2') psivar['SCF TOTAL ENERGY'] = mobj.group(1) mobj = re.search( r'^\s+' + r'(?:SCF has converged.)' + r'\s*$' + r'(?:.*?)' + r'^\s+' + r'(?:\d+)' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched scf3') psivar['SCF TOTAL ENERGY'] = mobj.group(1) # Process MP2 mobj = re.search( r'^\s+' + r'(?:E2\(AA\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:E2\(AB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:E2\(TOT\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:Total MP2 energy)' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*$', outtext, re.MULTILINE) if mobj: print('matched mp2r') psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = 2 * Decimal(mobj.group(1)) psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = mobj.group(2) psivar['MP2 CORRELATION ENERGY'] = 2 * Decimal(mobj.group(1)) + Decimal(mobj.group(2)) psivar['MP2 TOTAL ENERGY'] = mobj.group(4) mobj = re.search( r'^\s+' + r'(?:E2\(AA\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:E2\(BB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:E2\(AB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:E2\(TOT\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:Total MP2 energy)' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*$', outtext, re.MULTILINE) if mobj: print('matched mp2u') psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) + Decimal(mobj.group(2)) psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = mobj.group(3) psivar['MP2 CORRELATION ENERGY'] = Decimal(mobj.group(1)) + \ Decimal(mobj.group(2)) + Decimal(mobj.group(3)) psivar['MP2 TOTAL ENERGY'] = mobj.group(5) mobj = re.search( r'^\s+' + r'(?:E2\(AA\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:E2\(BB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:E2\(AB\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:E2\(SINGLE\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:E2\(TOT\))' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'(?:Total MP2 energy)' + r'\s+=\s+' + NUMBER + r'\s+a.u.\s*$', outtext, re.MULTILINE) if mobj: print('matched mp2ro') psivar['MP2 SAME-SPIN CORRELATION ENERGY'] = Decimal(mobj.group(1)) + Decimal(mobj.group(2)) psivar['MP2 OPPOSITE-SPIN CORRELATION ENERGY'] = mobj.group(3) psivar['MP2 SINGLES ENERGY'] = mobj.group(4) psivar['MP2 CORRELATION ENERGY'] = Decimal(mobj.group(1)) + \ Decimal(mobj.group(2)) + Decimal(mobj.group(3)) + Decimal(mobj.group(4)) psivar['MP2 TOTAL ENERGY'] = mobj.group(6) # Process MP3 mobj = re.search( r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched mp3r') dmp2 = Decimal(mobj.group(1)) dmp3 = Decimal(mobj.group(3)) psivar['MP2 CORRELATION ENERGY'] = dmp2 psivar['MP2 TOTAL ENERGY'] = mobj.group(2) psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3 psivar['MP3 TOTAL ENERGY'] = mobj.group(4) psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3 psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY'] mobj = re.search( r'^\s+' + r'(?:S-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:S-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched mp3ro') dmp2 = Decimal(mobj.group(1)) + Decimal(mobj.group(3)) dmp3 = Decimal(mobj.group(5)) + Decimal(mobj.group(7)) psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3 psivar['MP3 TOTAL ENERGY'] = mobj.group(8) psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3 psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY'] # Process MP4 mobj = re.search( r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:D-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:Q-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:S-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched mp4r') dmp2 = Decimal(mobj.group(1)) dmp3 = Decimal(mobj.group(3)) dmp4sdq = Decimal(mobj.group(5)) + Decimal(mobj.group(7)) + Decimal(mobj.group(9)) psivar['MP2 CORRELATION ENERGY'] = dmp2 psivar['MP2 TOTAL ENERGY'] = mobj.group(2) psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3 psivar['MP3 TOTAL ENERGY'] = mobj.group(4) psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3 psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY'] psivar['MP4(SDQ) CORRELATION ENERGY'] = dmp2 + dmp3 + dmp4sdq psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(10) mobj = re.search( r'^\s+' + r'(?:S-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:D-MBPT\(2\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:S-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:D-MBPT\(3\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:L-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:NL-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched mp4ro') dmp2 = Decimal(mobj.group(1)) + Decimal(mobj.group(3)) dmp3 = Decimal(mobj.group(5)) + Decimal(mobj.group(7)) dmp4sdq = Decimal(mobj.group(9)) + Decimal(mobj.group(11)) psivar['MP2 CORRELATION ENERGY'] = dmp2 psivar['MP2 TOTAL ENERGY'] = mobj.group(4) psivar['MP3 CORRELATION ENERGY'] = dmp2 + dmp3 psivar['MP3 TOTAL ENERGY'] = mobj.group(8) psivar['MP2.5 CORRELATION ENERGY'] = dmp2 + Decimal('0.500000000000') * dmp3 psivar['MP2.5 TOTAL ENERGY'] = psivar['MP2.5 CORRELATION ENERGY'] + psivar['SCF TOTAL ENERGY'] psivar['MP4(SDQ) CORRELATION ENERGY'] = dmp2 + dmp3 + dmp4sdq psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(12) mobj = re.search( r'^\s+' + r'(?:D-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:Q-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:S-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:T-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched mp4tr') dmp4sdq = Decimal(mobj.group(1)) + Decimal(mobj.group(3)) + Decimal(mobj.group(5)) dmp4t = Decimal(mobj.group(7)) psivar['MP4(SDQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(6) psivar['MP4(T) CORRECTION ENERGY'] = dmp4t psivar['MP4(SDTQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq + dmp4t psivar['MP4(SDTQ) TOTAL ENERGY'] = mobj.group(8) psivar['MP4 CORRELATION ENERGY'] = psivar['MP4(SDTQ) CORRELATION ENERGY'] psivar['MP4 TOTAL ENERGY'] = psivar['MP4(SDTQ) TOTAL ENERGY'] mobj = re.search( r'^\s+' + r'(?:L-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:NL-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:WT12-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*' + r'^\s+' + r'(?:T-MBPT\(4\))' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched mp4tro') dmp4sdq = Decimal(mobj.group(1)) + Decimal(mobj.group(3)) dmp4t = Decimal(mobj.group(5)) + Decimal(mobj.group(7)) # TODO: WT12 with T, not SDQ? psivar['MP4(SDQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq psivar['MP4(SDQ) TOTAL ENERGY'] = mobj.group(4) psivar['MP4(T) CORRECTION ENERGY'] = dmp4t psivar['MP4(SDTQ) CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] + dmp4sdq + dmp4t psivar['MP4(SDTQ) TOTAL ENERGY'] = mobj.group(8) psivar['MP4 CORRELATION ENERGY'] = psivar['MP4(SDTQ) CORRELATION ENERGY'] psivar['MP4 TOTAL ENERGY'] = psivar['MP4(SDTQ) TOTAL ENERGY'] # Process CC Iterations mobj = re.search( r'^\s+' + r'(?P<fullCC>(?P<iterCC>CC(?:\w+))(?:\(T\))?)' + r'\s+(?:energy will be calculated.)\s*' + r'(?:.*?)' + r'^\s+' + r'(?:\d+)' + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s+DIIS\s*' + r'^\s*(?:-+)\s*' + r'^\s*(?:A miracle (?:has come|come) to pass. The CC iterations have converged.)\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched cc with full %s iterating %s' % (mobj.group('fullCC'), mobj.group('iterCC'))) psivar['%s CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(3) psivar['%s TOTAL ENERGY' % (mobj.group('iterCC'))] = mobj.group(4) # Process CC(T) mobj = re.search( r'^\s+' + r'(?:E\(SCF\))' + r'\s+=\s+' + NUMBER + r'\s+a\.u\.\s*' + r'(?:.*?)' + r'^\s+' + r'(?:E\(CCSD\))' + r'\s+=\s+' + NUMBER + r'\s*' + r'(?:.*?)' + r'^\s+' + r'(?:E\(CCSD\(T\)\))' + r'\s+=\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched ccsd(t) vcc') psivar['SCF TOTAL ENERGY'] = mobj.group(1) psivar['CCSD TOTAL ENERGY'] = mobj.group(2) psivar['(T) CORRECTION ENERGY'] = Decimal(mobj.group(3)) - Decimal(mobj.group(2)) psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(3)) - Decimal(mobj.group(1)) psivar['CCSD(T) TOTAL ENERGY'] = mobj.group(3) mobj = re.search( r'^\s+' + r'(?:E\(SCF\))' + r'\s+=\s*' + NUMBER + r'\s+a\.u\.\s*' + r'(?:.*?)' + r'^\s+' + r'(?:CCSD energy)' + r'\s+' + NUMBER + r'\s*' + r'(?:.*?)' + r'^\s+' + r'(?:Total perturbative triples energy:)' + r'\s+' + NUMBER + r'\s*' + r'^\s*(?:-+)\s*' + r'^\s+' + r'(?:CCSD\(T\) energy)' + r'\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched ccsd(t) ecc') psivar['SCF TOTAL ENERGY'] = mobj.group(1) psivar['CCSD TOTAL ENERGY'] = mobj.group(2) psivar['(T) CORRECTION ENERGY'] = mobj.group(3) psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(4)) - Decimal(mobj.group(1)) psivar['CCSD(T) TOTAL ENERGY'] = mobj.group(4) mobj = re.search( r'^\s+' + r'(?:CCSD energy)' + r'\s+' + NUMBER + r'\s*' + r'^\s*(?:-+)\s*' + r'^\s+' + r'(?:CCSD\(T\) energy)' + r'\s+' + NUMBER + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: print('matched ccsd(t) lamb') psivar['CCSD TOTAL ENERGY'] = mobj.group(1) psivar['(T) CORRECTION ENERGY'] = Decimal(mobj.group(2)) - Decimal(mobj.group(1)) psivar['CCSD(T) CORRELATION ENERGY'] = Decimal(mobj.group(2)) - psivar['SCF TOTAL ENERGY'] psivar['CCSD(T) TOTAL ENERGY'] = mobj.group(2) # Process SCS-CC mobj = re.search( r'^\s+' + r'(?P<fullCC>(?P<iterCC>CC(?:\w+))(?:\(T\))?)' + r'\s+(?:energy will be calculated.)\s*' + r'(?:.*?)' + r'^\s*' + r'(?:@CCENRG-I, Correlation energies.)' + r'\s+(?:ECCAA)\s+' + NUMBER + r'\s*' + r'^\s+(?:ECCBB)\s+' + NUMBER + '\s*' + r'^\s+(?:ECCAB)\s+' + NUMBER + '\s*' + r'^\s+(?:Total)\s+' + NUMBER + '\s*', outtext, re.MULTILINE | re.DOTALL) if mobj: # PRINT=2 to get SCS-CC components print('matched scscc') psivar['%s SAME-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = Decimal(mobj.group(3)) + Decimal(mobj.group(4)) psivar['%s OPPOSITE-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(5) psivar['%s CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(6) mobj = re.search( r'^\s+' + r'(?P<fullCC>(?P<iterCC>CC(?:\w+))(?:\(T\))?)' + r'\s+(?:energy will be calculated.)\s*' + r'(?:.*?)' + r'^\s+' + r'Amplitude equations converged in' + r'\s*\d+\s*' + r'iterations.\s*' + r'^\s+' + r'The AA contribution to the correlation energy is:\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'The BB contribution to the correlation energy is:\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'The AB contribution to the correlation energy is:\s+' + NUMBER + r'\s+a.u.\s*' + r'^\s+' + r'The total correlation energy is\s+' + NUMBER + r'\s+a.u.\s*' + r'(?:.*?)' + #r'^\s+' + r'The CC iterations have converged.' + r'\s*$', r'^\s+' + r'(?:A miracle come to pass. )?' + r'The CC iterations have converged.' + r'\s*$', outtext, re.MULTILINE | re.DOTALL) if mobj: # PRINT=2 to get SCS components print('matched scscc2') psivar['%s SAME-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = Decimal(mobj.group(3)) + Decimal(mobj.group(4)) psivar['%s OPPOSITE-SPIN CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(5) psivar['%s CORRELATION ENERGY' % (mobj.group('iterCC'))] = mobj.group(6) # Process gradient mobj = re.search( r'\s+' + r'Molecular gradient' + r'\s*' + r'\s+' + r'------------------' + r'\s*' + r'\s+' + r'\n' + r'(?:(?:\s+[A-Z]+\s*#\d+\s+[xyz]\s+[-+]?\d+\.\d+\s*\n)+)' + # optional, it seems r'\n\n' + # optional, it seems r'((?:\s+[A-Z]+\s*#\d+\s+\d?\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s*\n)+)' + r'\n\n' + r'\s+' + 'Molecular gradient norm', outtext, re.MULTILINE) if mobj: print('matched molgrad') atoms = [] psivar_grad = [] for line in mobj.group(1).splitlines(): lline = line.split() atoms.append(lline[0]) #psivar_gradient.append([Decimal(lline[-3]), Decimal(lline[-2]), Decimal(lline[-1])]) psivar_grad.append([float(lline[-3]), float(lline[-2]), float(lline[-1])]) # Process geometry mobj = re.search( # r'\s+(?:-+)\s*' + # r'^\s+' + r'Z-matrix Atomic Coordinates (in bohr)' + r'\s*' + r'^\s+' + r'Symbol Number X Y Z' + r'\s*' + r'^\s+(?:-+)\s*' + r'((?:\s+[A-Z]+\s+[0-9]+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s+[-+]?\d+\.\d+\s*\n)+)' + r'^\s+(?:-+)\s*', outtext, re.MULTILINE) if mobj: print('matched geom') molxyz = '%d bohr\n\n' % len(mobj.group(1).splitlines()) for line in mobj.group(1).splitlines(): lline = line.split() molxyz += '%s %16s %16s %16s\n' % (lline[0], lline[-3], lline[-2], lline[-1]) # Rather a dinky Molecule as no ghost, charge, or multiplicity psivar_coord = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True) # Process atom geometry mobj = re.search( r'^\s+' + r'@GETXYZ-I, 1 atoms read from ZMAT.' + r'\s*' + r'^\s+' + r'[0-9]+\s+([A-Z]+)\s+[0-9]+\s+' + NUMBER + r'\s*', outtext, re.MULTILINE) if mobj: print('matched atom') # Dinky Molecule molxyz = '1 bohr\n\n%s 0.0 0.0 0.0\n' % (mobj.group(1)) psivar_coord = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True) # Process error codes mobj = re.search( r'^\s*' + r'--executable ' + r'(\w+)' + r' finished with status' + r'\s+' + r'([1-9][0-9]*)', outtext, re.MULTILINE) if mobj: print('matched error') psivar['CFOUR ERROR CODE'] = mobj.group(2) # Process CURRENT energies (TODO: needs better way) if 'SCF TOTAL ENERGY' in psivar: psivar['CURRENT REFERENCE ENERGY'] = psivar['SCF TOTAL ENERGY'] psivar['CURRENT ENERGY'] = psivar['SCF TOTAL ENERGY'] if 'MP2 TOTAL ENERGY' in psivar and 'MP2 CORRELATION ENERGY' in psivar: psivar['CURRENT CORRELATION ENERGY'] = psivar['MP2 CORRELATION ENERGY'] psivar['CURRENT ENERGY'] = psivar['MP2 TOTAL ENERGY'] if 'MP3 TOTAL ENERGY' in psivar and 'MP3 CORRELATION ENERGY' in psivar: psivar['CURRENT CORRELATION ENERGY'] = psivar['MP3 CORRELATION ENERGY'] psivar['CURRENT ENERGY'] = psivar['MP3 TOTAL ENERGY'] if 'MP4 TOTAL ENERGY' in psivar and 'MP4 CORRELATION ENERGY' in psivar: psivar['CURRENT CORRELATION ENERGY'] = psivar['MP4 CORRELATION ENERGY'] psivar['CURRENT ENERGY'] = psivar['MP4 TOTAL ENERGY'] # if ('%s TOTAL ENERGY' % (mobj.group('fullCC')) in psivar) and \ # ('%s CORRELATION ENERGY' % (mobj.group('fullCC')) in psivar): # psivar['CURRENT CORRELATION ENERGY'] = psivar['%s CORRELATION ENERGY' % (mobj.group('fullCC')] # psivar['CURRENT ENERGY'] = psivar['%s TOTAL ENERGY' % (mobj.group('fullCC')] if 'CC2 TOTAL ENERGY' in psivar and 'CC2 CORRELATION ENERGY' in psivar: psivar['CURRENT CORRELATION ENERGY'] = psivar['CC2 CORRELATION ENERGY'] psivar['CURRENT ENERGY'] = psivar['CC2 TOTAL ENERGY'] if 'CCSD TOTAL ENERGY' in psivar and 'CCSD CORRELATION ENERGY' in psivar: psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD CORRELATION ENERGY'] psivar['CURRENT ENERGY'] = psivar['CCSD TOTAL ENERGY'] if 'CCSD(T) TOTAL ENERGY' in psivar and 'CCSD(T) CORRELATION ENERGY' in psivar: psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSD(T) CORRELATION ENERGY'] psivar['CURRENT ENERGY'] = psivar['CCSD(T) TOTAL ENERGY'] if 'CC3 TOTAL ENERGY' in psivar and 'CC3 CORRELATION ENERGY' in psivar: psivar['CURRENT CORRELATION ENERGY'] = psivar['CC3 CORRELATION ENERGY'] psivar['CURRENT ENERGY'] = psivar['CC3 TOTAL ENERGY'] if 'CCSDT TOTAL ENERGY' in psivar and 'CCSDT CORRELATION ENERGY' in psivar: psivar['CURRENT CORRELATION ENERGY'] = psivar['CCSDT CORRELATION ENERGY'] psivar['CURRENT ENERGY'] = psivar['CCSDT TOTAL ENERGY'] return psivar, psivar_coord, psivar_grad def harvest(p4Mol, c4out, **largs): """Parses all the pieces of output from Cfour: the stdout in *c4out* and the contents of various scratch files like GRD stored in their namesake keys in *largs*. Since all Cfour output uses its own orientation and atom ordering for the given molecule, a qcdb.Molecule *p4Mol*, if supplied, is used to transform the Cfour output back into consistency with *p4Mol*. """ # Collect results from output file and subsidiary files outPsivar, outMol, outGrad = harvest_output(c4out) if 'GRD' in largs: grdMol, grdGrad = harvest_GRD(largs['GRD']) else: grdMol, grdGrad = None, None if 'FCMFINAL' in largs: fcmHess = harvest_FCM(largs['FCMFINAL']) else: fcmHess = None if 'DIPOL' in largs: dipolDip = harvest_DIPOL(largs['DIPOL']) else: dipolDip = None # Reconcile the coordinate information: several cases # Case p4Mol GRD Check consistency Apply orientation? ReturnMol (1-19-2014) # sp with mol thru cfour {} None None outMol N.C. outMol # opt with mol thru cfour {} None grdMol outMol && grdMol N.C. grdMol # sp with mol thru molecule {} p4Mol None p4Mol && outMol p4Mol <-- outMol p4Mol (same as input arg) # opt with mol thru molecule {} p4Mol grdMol p4Mol && outMol && grdMol p4Mol <-- grdMol p4Mol (same as input arg) if outMol: if grdMol: if abs(outMol.nuclear_repulsion_energy() - grdMol.nuclear_repulsion_energy()) > 1.0e-3: raise ValidationError("""Cfour outfile (NRE: %f) inconsistent with Cfour GRD (NRE: %f).""" % \ (outMol.nuclear_repulsion_energy(), grdMol.nuclear_repulsion_energy())) if p4Mol: if abs(outMol.nuclear_repulsion_energy() - p4Mol.nuclear_repulsion_energy()) > 1.0e-3: raise ValidationError("""Cfour outfile (NRE: %f) inconsistent with Psi4 input (NRE: %f).""" % \ (outMol.nuclear_repulsion_energy(), p4Mol.nuclear_repulsion_energy())) else: raise ValidationError("""No coordinate information extracted from Cfour output.""") # print ' <<< [1] P4-MOL >>>' # if p4Mol: # p4Mol.print_out_in_bohr() # print ' <<< [2] C4-OUT-MOL >>>' # if outMol: # outMol.print_out_in_bohr() # print ' <<< [3] C4-GRD-MOL >>>' # if grdMol: # grdMol.print_out_in_bohr() # Set up array reorientation object if p4Mol and grdMol: p4c4 = OrientMols(p4Mol, grdMol) oriCoord = p4c4.transform_coordinates2(grdMol) oriGrad = p4c4.transform_gradient(grdGrad) oriDip = None if dipolDip is None else p4c4.transform_vector(dipolDip) elif p4Mol and outMol: p4c4 = OrientMols(p4Mol, outMol) oriCoord = p4c4.transform_coordinates2(outMol) oriGrad = None oriDip = None if dipolDip is None else p4c4.transform_vector(dipolDip) elif outMol: oriCoord = None oriGrad = None oriDip = None if dipolDip is None else dipolDip # print p4c4 # print ' <<< [4] C4-ORI-MOL >>>' # if oriCoord is not None: # for item in oriCoord: # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2])) # # print ' <<< [1] C4-GRD-GRAD >>>' # if grdGrad is not None: # for item in grdGrad: # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2])) # print ' <<< [2] C4-ORI-GRAD >>>' # if oriGrad is not None: # for item in oriGrad: # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2])) retMol = None if p4Mol else grdMol if oriDip: outPsivar['CURRENT DIPOLE X'] = str(oriDip[0] * psi_dipmom_au2debye) outPsivar['CURRENT DIPOLE Y'] = str(oriDip[1] * psi_dipmom_au2debye) outPsivar['CURRENT DIPOLE Z'] = str(oriDip[2] * psi_dipmom_au2debye) if oriGrad: retGrad = oriGrad elif grdGrad: retGrad = grdGrad else: retGrad = None return outPsivar, retGrad, retMol def harvest_GRD(grd): """Parses the contents *grd* of the Cfour GRD file into the gradient array and coordinate information. The coordinate info is converted into a rather dinky Molecule (no charge, multiplicity, or fragment), but this is these coordinates that govern the reading of molecule orientation by Cfour. Return qcdb.Molecule and gradient array. """ grd = grd.splitlines() Nat = int(grd[0].split()[0]) molxyz = '%d bohr\n\n' % (Nat) grad = [] for at in range(Nat): mline = grd[at + 1].split() el = 'GH' if int(float(mline[0])) == 0 else z2el[int(float(mline[0]))] molxyz += '%s %16s %16s %16s\n' % (el, mline[-3], mline[-2], mline[-1]) lline = grd[at + 1 + Nat].split() grad.append([float(lline[-3]), float(lline[-2]), float(lline[-1])]) mol = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True) return mol, grad def harvest_zmat(zmat): """Parses the contents of the Cfour ZMAT file into array and coordinate information. The coordinate info is converted into a rather dinky Molecule (no fragment, but does read charge, mult, unit). Return qcdb.Molecule. Written for findif zmat* where geometry always Cartesian and Bohr. """ zmat = zmat.splitlines()[1:] # skip comment line Nat = 0 readCoord = True isBohr = '' charge = 0 mult = 1 molxyz = '' cgeom = [] for line in zmat: if line.strip() == '': readCoord = False elif readCoord: lline = line.split() molxyz += line + '\n' Nat += 1 else: if line.find('CHARGE') > -1: idx = line.find('CHARGE') charge = line[idx + 7:] idxc = charge.find(',') if idxc > -1: charge = charge[:idxc] charge = int(charge) if line.find('MULTIPLICITY') > -1: idx = line.find('MULTIPLICITY') mult = line[idx + 13:] idxc = mult.find(',') if idxc > -1: mult = mult[:idxc] mult = int(mult) if line.find('UNITS=BOHR') > -1: isBohr = ' bohr' molxyz = '%d%s\n%d %d\n' % (Nat, isBohr, charge, mult) + molxyz mol = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True) return mol def harvest_FCM(fcm): """Parses the contents *fcm* of the Cfour FCMFINAL file into a hessian array. """ fcm = fcm.splitlines() Nat = int(fcm[0].split()[0]) Ndof = int(fcm[0].split()[1]) empty = True hess = [] for df in range(Ndof): for at in range(Nat): lline = fcm[Ndof * at + at + 1].split() if empty: if (abs(float(lline[0])) > 1.0e-8) or \ (abs(float(lline[1])) > 1.0e-8) or \ (abs(float(lline[2])) > 1.0e-8): empty = False fcm.append([float(lline[0]), float(lline[1]), float(lline[2])]) return None if empty else hess def harvest_DIPOL(dipol): """Parses the contents *dipol* of the Cfour DIPOL file into a dipol vector. """ dipol = dipol.splitlines() lline = dipol[0].split() dip = [float(lline[0]), float(lline[1]), float(lline[2])] #return None if empty else dip return dip def muster_memory(mem): """Transform input *mem* in MB into psi4-type options for cfour. """ text = '' # prepare memory keywords to be set as c-side keywords options = defaultdict(lambda: defaultdict(dict)) options['CFOUR']['CFOUR_MEMORY_SIZE']['value'] = int(mem) options['CFOUR']['CFOUR_MEM_UNIT']['value'] = 'MB' for item in options['CFOUR']: options['CFOUR'][item]['clobber'] = True return text, options # Ways of modifying a computation # global: set global c-side option # local: set local c-side option # kwarg: set kwarg # i-local: set global=local c-side option to an interface module # ro-def: code uses default entirely specified by read_options # module-def: code uses default that is complex mixture of read_options settings # i-def: interfaced code uses defaults not entirely expressed in read_options # driver-def: driver code sets complex defaults # # Pure psi4 operation # kwarg ~= local > global > driver-def > module-def > ro-def # # Interfaced psi4 operation # kwarg ~= i-local > local > global > driver-def > i-def # P4 infrastructure replacing interfaced infrastructure (mol, basis, mem) where unavoidable overlap in how things are specified (mult in mol{} vs keyword) is treated as a clobber & complain if conflict VS P4 infrastructure as an aliased/convenient leak into interfaced infrastructure (psi) and is strictly no clobber or complain. def muster_psi4options(opt): """Translate psi4 keywords *opt* that have been explicitly set into their Cfour counterparts. Since explicitly set Cfour module keyword values will always be used preferentially to these inferred from psi4, the 'clobber' property is set to False. """ text = '' options = defaultdict(lambda: defaultdict(dict)) if 'GLOBALS' in opt: if 'PUREAM' in opt['GLOBALS']: options['CFOUR']['CFOUR_SPHERICAL']['value'] = \ opt['MINTS']['PUREAM']['value'] if 'SCF' in opt: if 'REFERENCE' in opt['SCF']: options['CFOUR']['CFOUR_REFERENCE']['value'] = \ {'RHF': 'RHF', 'UHF': 'UHF', 'ROHF': 'ROHF'}[opt['SCF']['REFERENCE']['value']] if 'D_CONVERGENCE' in opt['SCF']: options['CFOUR']['CFOUR_SCF_CONV']['value'] = \ conv_float2negexp(opt['SCF']['D_CONVERGENCE']['value']) if 'MAXITER' in opt['SCF']: options['CFOUR']['CFOUR_SCF_MAXCYC']['value'] = \ opt['SCF']['MAXITER']['value'] if 'DAMPING_PERCENTAGE' in opt['SCF']: options['CFOUR']['CFOUR_SCF_DAMPING']['value'] = \ int(10 * opt['SCF']['DAMPING_PERCENTAGE']['value']) for item in options['CFOUR']: options['CFOUR'][item]['clobber'] = False return text, options # Philosophy break: # Specification options # Massaging options # * No program's defaults should be tampered with w/o provokation # want all defaults applied to all programs, so p4 scf_conv is 5 and c4 scf_conv is 5 # want separate regimes, so conv 6 covers all the p4 parts and cfour_conv = 8 covers the c4 parts # want mixture, so basis gets applied to c4 but others don't # first case, when options specified explicitly # [scf][d_convergence] [cfour][cfour_scf_conv] what happens? # 8 from opt() 7 by default # 6 from set {...} 7 by default 6 (guideline that psi4 format converts when clear) # 8 from opt() 5 from set {...} 5 (local trumps) # 6 from set {...} 5 from set {...} 5 (local trumps) # # energy(name) [cfour][cfour_calc_level] # c4-scf SCF by default # c4-scf CCSD from set {...} def muster_modelchem(name, dertype): """Transform calculation method *name* and derivative level *dertype* into options for cfour. While deliberately requested pieces, generally |cfour__cfour_deriv_level| and |cfour__cfour_calc_level|, are set to complain if contradicted ('clobber' set to True), other 'recommended' settings, like |cfour__cfour_cc_program|, can be countermanded by keywords in input file ('clobber' set to False). Occasionally, want these pieces to actually overcome keywords in input file ('superclobber' set to True). """ text = '' lowername = name.lower() options = defaultdict(lambda: defaultdict(dict)) if dertype == 0: if lowername == 'cfour': pass # permit clean operation of sandwich mode else: options['CFOUR']['CFOUR_DERIV_LEVEL']['value'] = 'ZERO' elif dertype == 1: options['CFOUR']['CFOUR_DERIV_LEVEL']['value'] = 'FIRST' elif dertype == 2: options['CFOUR']['CFOUR_DERIV_LEVEL']['value'] = 'SECOND' else: raise ValidationError("""Requested Cfour dertype %d is not available.""" % (dertype)) if lowername == 'cfour': pass elif lowername == 'c4-scf': options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'SCF' elif lowername == 'c4-mp2': options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'MP2' elif lowername == 'c4-mp3': options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'MP3' elif lowername == 'c4-mp4(sdq)': options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'SDQ-MP4' elif lowername == 'c4-mp4': options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'MP4' elif lowername == 'c4-cc2': options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CC2' elif lowername == 'c4-ccsd': options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSD' options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'ECC' elif lowername == 'c4-cc3': options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CC3' elif lowername == 'c4-ccsd(t)': # Can't use (T) b/c bug in xsymcor lops it off #options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSD(T)' options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSD[T]' options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'ECC' elif lowername == 'c4-ccsdt': options['CFOUR']['CFOUR_CALC_LEVEL']['value'] = 'CCSDT' options['CFOUR']['CFOUR_CC_PROGRAM']['value'] = 'ECC' else: raise ValidationError("""Requested Cfour computational methods %d is not available.""" % (lowername)) # Set clobbering if 'CFOUR_DERIV_LEVEL' in options['CFOUR']: options['CFOUR']['CFOUR_DERIV_LEVEL']['clobber'] = True options['CFOUR']['CFOUR_DERIV_LEVEL']['superclobber'] = True if 'CFOUR_CALC_LEVEL' in options['CFOUR']: options['CFOUR']['CFOUR_CALC_LEVEL']['clobber'] = True options['CFOUR']['CFOUR_CALC_LEVEL']['superclobber'] = True if 'CFOUR_CC_PROGRAM' in options['CFOUR']: options['CFOUR']['CFOUR_CC_PROGRAM']['clobber'] = False return text, options def cfour_list(): """Return an array of Cfour methods with energies. Appended to procedures['energy']. """ val = [] val.append('cfour') val.append('c4-scf') val.append('c4-mp2') val.append('c4-mp3') val.append('c4-mp4(sdq)') val.append('c4-mp4') val.append('c4-cc2') val.append('c4-ccsd') val.append('c4-cc3') val.append('c4-ccsd(t)') val.append('c4-ccsdt') return val def cfour_gradient_list(): """Return an array of Cfour methods with analytical gradients. Appended to procedures['gradient']. """ val = [] val.append('cfour') val.append('c4-scf') val.append('c4-mp2') val.append('c4-mp3') val.append('c4-mp4(sdq)') val.append('c4-mp4') val.append('c4-cc2') val.append('c4-ccsd') val.append('c4-cc3') val.append('c4-ccsd(t)') val.append('c4-ccsdt') return val def cfour_psivar_list(): """Return a dict with keys of most Cfour methods and values of dicts with the PSI Variables returned by those methods. Used by cbs() wrapper to avoid unnecessary computations in compound methods. Result is appended to ``VARH``. """ VARH = {} VARH['c4-scf'] = { 'c4-scftot': 'SCF TOTAL ENERGY'} VARH['c4-mp2'] = { 'c4-scftot': 'SCF TOTAL ENERGY', 'c4-mp2corl': 'MP2 CORRELATION ENERGY'} VARH['c4-mp3'] = { 'c4-scftot': 'SCF TOTAL ENERGY', 'c4-mp2corl': 'MP2 CORRELATION ENERGY', 'c4-mp2.5corl': 'MP2.5 CORRELATION ENERGY', 'c4-mp3corl': 'MP3 CORRELATION ENERGY'} VARH['c4-mp4(sdq)'] = { 'c4-scftot': 'SCF TOTAL ENERGY', 'c4-mp2corl': 'MP2 CORRELATION ENERGY', 'c4-mp2.5corl': 'MP2.5 CORRELATION ENERGY', 'c4-mp3corl': 'MP3 CORRELATION ENERGY', 'c4-mp4(sdq)corl': 'MP4(SDQ) CORRELATION ENERGY'} VARH['c4-mp4'] = { 'c4-scftot': 'SCF TOTAL ENERGY', 'c4-mp2corl': 'MP2 CORRELATION ENERGY', 'c4-mp2.5corl': 'MP2.5 CORRELATION ENERGY', 'c4-mp3corl': 'MP3 CORRELATION ENERGY', 'c4-mp4(sdq)corl': 'MP4(SDQ) CORRELATION ENERGY', 'c4-mp4corl': 'MP4(SDTQ) CORRELATION ENERGY'} VARH['c4-cc2'] = { 'c4-scftot': 'SCF TOTAL ENERGY', 'c4-mp2corl': 'MP2 CORRELATION ENERGY', 'c4-cc2corl': 'CC2 CORRELATION ENERGY'} VARH['c4-ccsd'] = { 'c4-scftot': 'SCF TOTAL ENERGY', 'c4-mp2corl': 'MP2 CORRELATION ENERGY', 'c4-ccsdcorl': 'CCSD CORRELATION ENERGY'} VARH['c4-cc3'] = { 'c4-scftot': 'SCF TOTAL ENERGY', 'c4-mp2corl': 'MP2 CORRELATION ENERGY', 'c4-cc3corl': 'CC3 CORRELATION ENERGY'} VARH['c4-ccsd(t)'] = { 'c4-scftot': 'SCF TOTAL ENERGY', 'c4-mp2corl': 'MP2 CORRELATION ENERGY', 'c4-ccsdcorl': 'CCSD CORRELATION ENERGY', 'c4-ccsd(t)corl': 'CCSD(T) CORRELATION ENERGY'} VARH['c4-ccsdt'] = { 'c4-scftot': 'SCF TOTAL ENERGY', 'c4-mp2corl': 'MP2 CORRELATION ENERGY', 'c4-ccsdcorl': 'CCSD CORRELATION ENERGY', 'c4-ccsdtcorl': 'CCSDT CORRELATION ENERGY'} return VARH #def backtransform(chgeMol, permMol, chgeGrad=None, chgeDip=None): #def format_fjobarc(fje, fjelem, fjcoord, fjgrd, map, fjdip): def format_fjobarc(energy, map, elem, coordinates, gradient, dipole): """Takes the key results from a gradient computation (*energy*, element Z list *elem*, *coordinates*, *gradient*, *dipole*, and atom ordering *map*) and writes a string *fja* that exactly mimics the contents of a Cfour FJOBARC file. """ fja = 'TOTENERG\n' fja += '%15d%15d\n' % (struct.unpack("ii", struct.pack("d", energy))) fja += 'COORD\n' Nat = len(coordinates) flatcoord = [] for at in range(Nat): for xyz in range(3): flatcoord.append(coordinates[map[at]][xyz]) for idx in range(len(flatcoord)): if abs(flatcoord[idx]) < 1.0E-14: # TODO flatcoord[idx] = 0.0 fja += '%15d%15d' % (struct.unpack("ii", struct.pack("d", flatcoord[idx]))) if idx % 2 == 1: fja += '\n' if len(flatcoord) % 2 == 1: fja += '\n' fja += 'MAP2ZMAT\n' for idx in range(Nat): fja += '%15d%15d' % (struct.unpack("ii", struct.pack("l", map[idx] + 1))) if idx % 2 == 1: fja += '\n' if Nat % 2 == 1: fja += '\n' fja += 'GRD FILE\n' fja += '%5d%20.10f\n' % (Nat, 0.0) for at in range(Nat): fja += '%20.10f%20.10f%20.10f%20.10f\n' % (elem[at], coordinates[at][0], coordinates[at][1], coordinates[at][2]) for at in range(Nat): fja += '%20.10f%20.10f%20.10f%20.10f\n' % (elem[at], gradient[at][0], gradient[at][1], gradient[at][2]) fja += 'DIPOL FILE\n' fja += '%20.10f%20.10f%20.10f\n' % (dipole[0], dipole[1], dipole[2]) return fja def backtransform(chgeMol, permMol, chgeGrad=None, chgeDip=None): """Here, *chgeMol* and *chgeGrd* need to be turned into the native Cfour orientation embodied by *permMol*. Currently for vpt2. """ # Set up array reorientation object p4c4 = OrientMols(permMol, chgeMol) # opposite than usual oriCoord = p4c4.transform_coordinates2(chgeMol) p4Elem = [] for at in range(chgeMol.natom()): p4Elem.append(chgeMol.Z(at)) oriElem = p4c4.transform_elementlist(p4Elem) oriElemMap = p4c4.Catommap oriGrad = None if chgeGrad is None else p4c4.transform_gradient(chgeGrad) oriDip = None if chgeDip is None else p4c4.transform_vector(chgeDip) if chgeGrad and chgeDip: return oriElemMap, oriElem, oriCoord, oriGrad, oriDip else: return oriElemMap, oriElem, oriCoord #def backtransform_grad(p4Mol, c4Mol, p4Grd, p4Dip): # """Here, p4Mol and p4Grd need to be turned into the native Cfour # orientation embodied by c4Mol. Currently for vpt2. # # """ # # Set up array reorientation object # p4c4 = OrientMols(c4Mol, p4Mol) # opposite than usual # oriCoord = p4c4.transform_coordinates2(p4Mol) # oriGrad = p4c4.transform_gradient(p4Grd) # p4Elem = [] # for at in range(p4Mol.natom()): # p4Elem.append(p4Mol.Z(at)) # oriElem = p4c4.transform_elementlist(p4Elem) # oriElemMap = p4c4.Catommap # oriDip = p4c4.transform_vector(p4Dip) # # #print p4c4 # #print ' <<< Input C4 Mol >>>' # #c4Mol.print_out() # #print ' <<< Input P4 Mol >>>' # #p4Mol.print_out() # #print ' <<< Input P4 Grad >>>' # #if p4Grd is not None: # # for item in p4Grd: # # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2])) # #print ' <<< Rotated P4 Coord >>>' # #if oriCoord is not None: # # for item in oriCoord: # # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2])) # #print ' <<< Rotated P4 Elem >>>' # #if oriElem is not None: # # for item in oriElem : # # print(' %16.8f' % (item)) # #print ' <<< Rotated P4 Dip >>>' # #if oriDip is not None: # # print(' %16.8f %16.8f %16.8f' % (oriDip[0], oriDip[1], oriDip[2])) # #print ' <<< Rotated P4 Grad >>>' # #if oriGrad is not None: # # for item in oriGrad: # # print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2])) # # return oriElemMap, oriElem, oriCoord, oriGrad, oriDip # #return oriElem, oriCoord, oriGrad, oriElemMap, oriDip def jajo2mol(jajodic): """Returns a Molecule from entries in dictionary *jajodic* extracted from JAINDX and JOBARC. """ map = jajodic['MAP2ZMAT'] elem = jajodic['ATOMCHRG'] coord = jajodic['COORD '] Nat = len(elem) molxyz = '%d bohr\n\n' % (Nat) # TODO chgmult, though not really necessary for reorientation for at in range(Nat): posn = map[at] - 1 el = 'GH' if elem[posn] == 0 else z2el[elem[posn]] posn *= 3 molxyz += '%s %21.15f %21.15f %21.15f\n' % (el, coord[posn], coord[posn + 1], coord[posn + 2]) mol = Molecule.init_with_xyz(molxyz, no_com=True, no_reorient=True, contentsNotFilename=True) return mol
kannon92/psi4
psi4/driver/qcdb/cfour.py
Python
gpl-2.0
46,066
[ "CFOUR", "Psi4" ]
b80e7f7b29a0097adef7e3f8f6df26c318b216fefd5c48c13547ceb0b83bc78b
""" SystemAdministrator service is a tool to control and monitor the DIRAC services and agents """ import socket import os import re import time import getpass import importlib import shutil import platform import psutil import tempfile import subprocess import requests from packaging.version import Version, InvalidVersion import subprocess as commands from datetime import datetime, timedelta from diraccfg import CFG from DIRAC import S_OK, S_ERROR, gConfig, rootPath, gLogger, convertToPy3VersionNumber from DIRAC.Core.DISET.RequestHandler import RequestHandler from DIRAC.Core.Utilities import Os from DIRAC.Core.Utilities.Extensions import extensionsByPriority, getExtensionMetadata from DIRAC.Core.Utilities.File import mkLink from DIRAC.Core.Utilities.Time import dateTime, fromString, hour, day from DIRAC.Core.Utilities.Subprocess import shellCall from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler from DIRAC.Core.Security.Locations import getHostCertificateAndKeyLocation from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error from DIRAC.ConfigurationSystem.Client import PathFinder from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient import ComponentMonitoringClient gProfilers = {} # pylint: disable=no-self-use def loadDIRACCFG(): installPath = gConfig.getValue("/LocalInstallation/TargetPath", gConfig.getValue("/LocalInstallation/RootPath", "")) if not installPath: installPath = rootPath cfgPath = os.path.join(installPath, "etc", "dirac.cfg") try: diracCFG = CFG().loadFromFile(cfgPath) except Exception as excp: return S_ERROR("Could not load dirac.cfg: %s" % repr(excp)) return S_OK((cfgPath, diracCFG)) class SystemAdministratorHandler(RequestHandler): @classmethod def initializeHandler(cls, serviceInfo): """ Handler class initialization """ # Check the flag for monitoring of the state of the host hostMonitoring = cls.srv_getCSOption("HostMonitoring", True) if hostMonitoring: gThreadScheduler.addPeriodicTask(60, cls.__storeHostInfo) # the SystemAdministrator service does not has to use the client to report data about the host. keepSoftwareVersions = cls.srv_getCSOption("KeepSoftwareVersions", 0) if keepSoftwareVersions > 0: gLogger.info( "The last %s software version will be kept and the rest will be deleted!" % keepSoftwareVersions ) gThreadScheduler.addPeriodicTask( 600, cls.__deleteOldSoftware, (keepSoftwareVersions,), executions=2 ) # it is enough to try 2 times return S_OK("Initialization went well") types_getInfo = [] def export_getInfo(self): """Get versions of the installed DIRAC software and extensions, setup of the local installation """ return gComponentInstaller.getInfo() types_getSoftwareComponents = [] def export_getSoftwareComponents(self): """Get the list of all the components ( services and agents ) for which the software is installed on the system """ return gComponentInstaller.getSoftwareComponents(extensionsByPriority()) types_getInstalledComponents = [] def export_getInstalledComponents(self): """Get the list of all the components ( services and agents ) installed on the system in the runit directory """ return gComponentInstaller.getInstalledComponents() types_getSetupComponents = [] def export_getSetupComponents(self): """Get the list of all the components ( services and agents ) set up for running with runsvdir in /opt/dirac/startup directory """ return gComponentInstaller.getSetupComponents() types_getOverallStatus = [] def export_getOverallStatus(self): """Get the complete status information of all components.""" result = gComponentInstaller.getOverallStatus(extensionsByPriority()) if not result["OK"]: return result statusDict = result["Value"] for compType in statusDict: for system in statusDict[compType]: for component in statusDict[compType][system]: result = gComponentInstaller.getComponentModule(system, component, compType) if not result["OK"]: statusDict[compType][system][component]["Module"] = "Unknown" else: statusDict[compType][system][component]["Module"] = result["Value"] return S_OK(statusDict) types_getStartupComponentStatus = [list] def export_getStartupComponentStatus(self, componentTupleList): """Get the list of all the components ( services and agents ) set up for running with runsvdir in startup directory """ return gComponentInstaller.getStartupComponentStatus(componentTupleList) types_installComponent = [str, str, str] def export_installComponent(self, componentType, system, component, componentModule=""): """Install runit directory for the specified component""" return gComponentInstaller.installComponent( componentType, system, component, extensionsByPriority(), componentModule ) types_setupComponent = [str, str, str] def export_setupComponent(self, componentType, system, component, componentModule=""): """Setup the specified component for running with the runsvdir daemon It implies installComponent """ result = gComponentInstaller.setupComponent( componentType, system, component, extensionsByPriority(), componentModule ) gConfig.forceRefresh() return result types_addDefaultOptionsToComponentCfg = [str, str] def export_addDefaultOptionsToComponentCfg(self, componentType, system, component): """Add default component options local component cfg""" return gComponentInstaller.addDefaultOptionsToComponentCfg( componentType, system, component, extensionsByPriority() ) types_unsetupComponent = [str, str] def export_unsetupComponent(self, system, component): """Removed the specified component from running with the runsvdir daemon""" return gComponentInstaller.unsetupComponent(system, component) types_uninstallComponent = [str, str, bool] def export_uninstallComponent(self, system, component, removeLogs): """Remove runit directory for the specified component It implies unsetupComponent """ return gComponentInstaller.uninstallComponent(system, component, removeLogs) types_startComponent = [str, str] def export_startComponent(self, system, component): """Start the specified component, running with the runsv daemon""" return gComponentInstaller.runsvctrlComponent(system, component, "u") types_restartComponent = [str, str] def export_restartComponent(self, system, component): """Restart the specified component, running with the runsv daemon""" return gComponentInstaller.runsvctrlComponent(system, component, "t") types_stopComponent = [str, str] def export_stopComponent(self, system, component): """Stop the specified component, running with the runsv daemon""" return gComponentInstaller.runsvctrlComponent(system, component, "d") types_getLogTail = [str, str] def export_getLogTail(self, system, component, length=100): """Get the tail of the component log file""" return gComponentInstaller.getLogTail(system, component, length) ###################################################################################### # Database related methods # types_getMySQLStatus = [] def export_getMySQLStatus(self): """Get the status of the MySQL database installation""" return gComponentInstaller.getMySQLStatus() types_getDatabases = [] def export_getDatabases(self, mysqlPassword=None): """Get the list of installed databases""" if mysqlPassword: gComponentInstaller.setMySQLPasswords(mysqlPassword) return gComponentInstaller.getDatabases() types_getAvailableDatabases = [] def export_getAvailableDatabases(self): """Get the list of databases which software is installed in the system""" return gComponentInstaller.getAvailableDatabases() types_installDatabase = [str] def export_installDatabase(self, dbName, mysqlPassword=None): """Install a DIRAC database named dbName""" if mysqlPassword: gComponentInstaller.setMySQLPasswords(mysqlPassword) return gComponentInstaller.installDatabase(dbName) types_uninstallDatabase = [str] def export_uninstallDatabase(self, dbName, mysqlPassword=None): """Uninstall a DIRAC database named dbName""" if mysqlPassword: gComponentInstaller.setMySQLPasswords(mysqlPassword) return gComponentInstaller.uninstallDatabase(gConfig, dbName) types_addDatabaseOptionsToCS = [str, str] def export_addDatabaseOptionsToCS(self, system, database, overwrite=False): """Add the section with the database options to the CS""" return gComponentInstaller.addDatabaseOptionsToCS(gConfig, system, database, overwrite=overwrite) types_addDefaultOptionsToCS = [str, str, str] def export_addDefaultOptionsToCS(self, componentType, system, component, overwrite=False): """Add default component options to the global CS or to the local options""" return gComponentInstaller.addDefaultOptionsToCS( gConfig, componentType, system, component, extensionsByPriority(), overwrite=overwrite ) ####################################################################################### # General purpose methods # types_updateSoftware = [str] def export_updateSoftware(self, version): # Validate and normalise the requested version primaryExtension = None if "==" in version: primaryExtension, version = version.split("==") try: version = Version(version) except InvalidVersion: self.log.exception("Invalid version passed", version) return S_ERROR("Invalid version passed %r" % version) isPrerelease = version.is_prerelease version = "v%s" % version # Find what to install otherExtensions = [] for extension in extensionsByPriority(): if primaryExtension is None and getExtensionMetadata(extension).get("primary_extension", False): primaryExtension = extension else: otherExtensions.append(extension) self.log.info("Installing Python 3 based", "%s %s" % (primaryExtension, version)) self.log.info("Will also install", repr(otherExtensions)) # Install DIRACOS installer_url = ( "https://github.com/DIRACGrid/DIRACOS2/releases/latest/download/DIRACOS-Linux-%s.sh" % platform.machine() ) self.log.info("Downloading DIRACOS2 installer from", installer_url) with tempfile.NamedTemporaryFile(suffix=".sh", mode="wb") as installer: with requests.get(installer_url, stream=True) as r: if not r.ok: return S_ERROR("Failed to download %s" % installer_url) for chunk in r.iter_content(chunk_size=1024 ** 2): installer.write(chunk) installer.flush() self.log.info("Downloaded DIRACOS installer to", installer.name) newProPrefix = os.path.join( rootPath, "versions", "%s-%s" % (version, datetime.utcnow().strftime("%s")), ) installPrefix = os.path.join(newProPrefix, "%s-%s" % (platform.system(), platform.machine())) self.log.info("Running DIRACOS installer for prefix", installPrefix) r = subprocess.run( ["bash", installer.name, "-p", installPrefix], stderr=subprocess.PIPE, universal_newlines=True, check=False, timeout=600, ) if r.returncode != 0: stderr = [x for x in r.stderr.split("\n") if not x.startswith("Extracting : ")] self.log.error( "Installing DIRACOS2 failed with returncode", "%s and stdout: %s" % (r.returncode, stderr) ) return S_ERROR("Failed to install DIRACOS2 %s" % stderr) # Install DIRAC cmd = ["%s/bin/pip" % installPrefix, "install", "--no-color", "-v"] if isPrerelease: cmd += ["--pre"] cmd += ["%s[server]==%s" % (primaryExtension, version)] cmd += ["%s[server]" % e for e in otherExtensions] r = subprocess.run( cmd, stderr=subprocess.PIPE, universal_newlines=True, check=False, timeout=600, ) if r.returncode != 0: self.log.error("Installing DIRACOS2 failed with returncode", "%s and stdout: %s" % (r.returncode, r.stderr)) return S_ERROR("Failed to install DIRACOS2 with message %s" % r.stderr) # Update the pro link oldLink = os.path.join(gComponentInstaller.instancePath, "old") proLink = os.path.join(gComponentInstaller.instancePath, "pro") if os.path.exists(oldLink): os.remove(oldLink) os.rename(proLink, oldLink) mkLink(newProPrefix, proLink) return S_OK() types_revertSoftware = [] def export_revertSoftware(self): """Revert the last installed version of software to the previous one""" oldLink = os.path.join(gComponentInstaller.instancePath, "old") oldPath = os.readlink(oldLink) proLink = os.path.join(gComponentInstaller.instancePath, "pro") os.remove(proLink) mkLink(oldPath, proLink) return S_OK(oldPath) types_setProject = [str] def export_setProject(self, projectName): result = loadDIRACCFG() if not result["OK"]: return result cfgPath, diracCFG = result["Value"] gLogger.notice("Setting project to %s" % projectName) diracCFG.setOption("/LocalInstallation/Project", projectName, "Project to install") try: with open(cfgPath, "w") as fd: fd.write(str(diracCFG)) except IOError as excp: return S_ERROR("Could not write dirac.cfg: %s" % str(excp)) return S_OK() types_getProject = [] def export_getProject(self): result = loadDIRACCFG() if not result["OK"]: return result _cfgPath, diracCFG = result["Value"] return S_OK(diracCFG.getOption("/LocalInstallation/Project", "DIRAC")) types_addOptionToDiracCfg = [str, str] def export_addOptionToDiracCfg(self, option, value): """Set option in the local configuration file""" return gComponentInstaller.addOptionToDiracCfg(option, value) types_executeCommand = [str] def export_executeCommand(self, command): """Execute a command locally and return its output""" result = shellCall(60, command) return result types_checkComponentLog = [[str, list]] def export_checkComponentLog(self, component): """Check component log for errors""" componentList = [] if "*" in component: if component == "*": result = gComponentInstaller.getSetupComponents() if result["OK"]: for ctype in ["Services", "Agents", "Executors"]: if ctype in result["Value"]: for sname in result["Value"][ctype]: for cname in result["Value"][ctype][sname]: componentList.append("/".join([sname, cname])) elif isinstance(component, str): componentList = [component] else: componentList = component resultDict = {} for comp in componentList: if "/" not in comp: continue system, cname = comp.split("/") startDir = gComponentInstaller.startDir currentLog = startDir + "/" + system + "_" + cname + "/log/current" try: with open(currentLog, "r") as logFile: logLines = logFile.readlines() except IOError as err: gLogger.error("File does not exists:", currentLog) resultDict[comp] = {"ErrorsHour": -1, "ErrorsDay": -1, "LastError": currentLog + "::" + repr(err)} continue errors_1 = 0 errors_24 = 0 now = dateTime() lastError = "" for line in logLines: if "ERROR:" in line: fields = line.split() recent = False if len(fields) < 2: # if the line contains only one word lastError = line.split("ERROR:")[-1].strip() continue timeStamp = fromString(fields[0] + " " + fields[1]) if not timeStamp: # if the timestamp is missing in the log lastError = line.split("ERROR:")[-1].strip() continue if (now - timeStamp) < hour: errors_1 += 1 recent = True if (now - timeStamp) < day: errors_24 += 1 recent = True if recent: lastError = line.split("ERROR:")[-1].strip() resultDict[comp] = {"ErrorsHour": errors_1, "ErrorsDay": errors_24, "LastError": lastError} return S_OK(resultDict) @staticmethod def __readHostInfo(): """Get host current loads, memory, etc""" result = dict() # Memory info re_parser = re.compile(r"^(?P<key>\S*):\s*(?P<value>\d*)\s*kB") for line in open("/proc/meminfo"): match = re_parser.match(line) if not match: continue key, value = match.groups(["key", "value"]) result[key] = int(value) for mtype in ["Mem", "Swap"]: memory = int(result.get(mtype + "Total")) mfree = int(result.get(mtype + "Free")) if memory > 0: percentage = float(memory - mfree) / float(memory) * 100.0 else: percentage = 0 name = "Memory" if mtype == "Swap": name = "Swap" result[name] = "%.1f%%/%.1fMB" % (percentage, memory / 1024.0) # Loads l1, l5, l15 = (str(lx) for lx in os.getloadavg()) result["Load1"] = l1 result["Load5"] = l5 result["Load15"] = l15 result["Load"] = "/".join([l1, l5, l15]) # CPU info with open("/proc/cpuinfo", "r") as fd: lines = fd.readlines() processors = 0 physCores = {} for line in lines: if line.strip(): parameter, value = line.split(":") parameter = parameter.strip() value = value.strip() if parameter.startswith("processor"): processors += 1 if parameter.startswith("physical id"): physCores[value] = parameter if parameter.startswith("model name"): result["CPUModel"] = value if parameter.startswith("cpu MHz"): result["CPUClock"] = value result["Cores"] = processors result["PhysicalCores"] = len(physCores) # Disk occupancy summary = "" _status, output = commands.getstatusoutput("df") lines = output.split("\n") for i in range(len(lines)): if lines[i].startswith("/dev"): fields = lines[i].split() if len(fields) == 1: fields += lines[i + 1].split() partition = fields[5] occupancy = fields[4] summary += ",%s:%s" % (partition, occupancy) result["DiskOccupancy"] = summary[1:] result["RootDiskSpace"] = Os.getDiskSpace(rootPath) # Open files puser = getpass.getuser() _status, output = commands.getstatusoutput("lsof") pipes = 0 files = 0 sockets = 0 lines = output.split("\n") for line in lines: fType = line.split()[4] user = line.split()[2] if user == puser: if fType in ["REG"]: files += 1 elif fType in ["unix", "IPv4"]: sockets += 1 elif fType in ["FIFO"]: pipes += 1 result["OpenSockets"] = sockets result["OpenFiles"] = files result["OpenPipes"] = pipes infoResult = gComponentInstaller.getInfo() if infoResult["OK"]: result.update(infoResult["Value"]) # the infoResult value is {"Extensions":{'a1':'v1',a2:'v2'}; we convert to a string result.update( { "Extensions": ";".join( ["%s:%s" % (key, value) for (key, value) in infoResult["Value"].get("Extensions").items()] ) } ) # Host certificate properties certFile, _keyFile = getHostCertificateAndKeyLocation() chain = X509Chain() chain.loadChainFromFile(certFile) resultCert = chain.getCredentials() if resultCert["OK"]: result["SecondsLeft"] = resultCert["Value"]["secondsLeft"] result["CertificateValidity"] = str(timedelta(seconds=resultCert["Value"]["secondsLeft"])) result["CertificateDN"] = resultCert["Value"]["subject"] result["CertificateIssuer"] = resultCert["Value"]["issuer"] # Host uptime result["Uptime"] = str(timedelta(seconds=(time.time() - psutil.boot_time()))) return S_OK(result) types_getHostInfo = [] def export_getHostInfo(self): """ Retrieve host parameters """ client = ComponentMonitoringClient() result = client.getLog(socket.getfqdn()) if result["OK"]: return S_OK(result["Value"][0]) return self.__readHostInfo() types_getUsedPorts = [] def export_getUsedPorts(self): """ Retrieve the ports in use by services on this host :return: Returns a dictionary containing, for each system, which port is being used by which service """ result = gComponentInstaller.getSetupComponents() if not result["OK"]: return result services = result["Value"]["Services"] ports = {} for system in services: ports[system] = {} for service in services[system]: url = PathFinder.getServiceURL("%s/%s" % (system, service)) port = re.search(r":(\d{4,5})/", url) if port: ports[system][service] = port.group(1) else: ports[system][service] = "None" return S_OK(ports) types_getComponentDocumentation = [str, str, str] def export_getComponentDocumentation(self, cType, system, module): if cType == "service": module = "%sHandler" % module # Look for the component in extensions for extension in extensionsByPriority(): moduleName = [extension, system + "System", cType.capitalize(), module] try: importedModule = importlib.import_module(moduleName) return S_OK(importedModule.__doc__) except Exception: pass return S_ERROR("No documentation was found") @staticmethod def __storeHostInfo(): """ Retrieves and stores into a MySQL database information about the host """ result = SystemAdministratorHandler.__readHostInfo() if not result["OK"]: gLogger.error(result["Message"]) return result fields = result["Value"] fields["Timestamp"] = datetime.utcnow() fields["Extension"] = fields["Extensions"] client = ComponentMonitoringClient() result = client.updateLog(socket.getfqdn(), fields) if not result["OK"]: gLogger.error(result["Message"]) return result return S_OK("Profiling information logged correctly") @staticmethod def __deleteOldSoftware(keepLast): """ It removes all versions except the last x :param int keepLast: the number of the software version, what we keep """ versionsDirectory = os.path.join(rootPath, "versions") if not os.path.isdir(versionsDirectory): gLogger.error("The DIRAC.rootPath is not correct:", versionsDirectory) return softwareDirs = {} for dirName in os.listdir(versionsDirectory): try: # Python 3 uses dashes while Python 2 uses underscores so replace and split # v10.3.1-1637142594, v10r2p10_1629962176 # TODO: This can be simplified eventually but it's better to leave it for now version, timestamp = dirName.replace("_", "-").split("-") version = Version(convertToPy3VersionNumber(version)) timestamp = int(timestamp) except Exception: gLogger.exception("Failed to extract version info from", "%r in %r" % (dirName, versionsDirectory)) continue softwareDirs[dirName] = (version, timestamp) softwareDirs = sorted(softwareDirs, key=softwareDirs.__getitem__, reverse=False) try: for directoryName in softwareDirs[: -1 * int(keepLast)]: fullPath = os.path.join(versionsDirectory, directoryName) gLogger.info("Removing %s directory." % fullPath) shutil.rmtree(fullPath) except Exception as e: gLogger.error("Can not delete old DIRAC versions from the file system", repr(e))
ic-hep/DIRAC
src/DIRAC/FrameworkSystem/Service/SystemAdministratorHandler.py
Python
gpl-3.0
26,833
[ "DIRAC" ]
479f69e0924849abec0b1d1189725b3de17cecd677fdb876180d9d11dd5bccf3
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase import vtk class vtkSLCReader(SimpleVTKClassModuleBase): def __init__(self, module_manager): SimpleVTKClassModuleBase.__init__( self, module_manager, vtk.vtkSLCReader(), 'Reading vtkSLC.', (), ('vtkSLC',), replaceDoc=True, inputFunctions=None, outputFunctions=None)
nagyistoce/devide
modules/vtk_basic/vtkSLCReader.py
Python
bsd-3-clause
464
[ "VTK" ]
6fe36824faf327819f59d756d8c256f5b2948f5e8412bc3a1cd6988c82df5d80
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- import io from unittest import TestCase, main import numpy as np import pandas as pd import numpy.testing as npt from skbio import OrdinationResults from skbio.io import OrdinationFormatError from skbio.io.format.ordination import ( _ordination_to_ordination_results, _ordination_results_to_ordination, _ordination_sniffer) from skbio.util import get_data_path, assert_ordination_results_equal class OrdinationTestData(TestCase): def setUp(self): self.valid_fps = map( get_data_path, ['ordination_L&L_CA_data_scores', 'ordination_example3_scores', 'ordination_PCoA_sample_data_3_scores', 'ordination_example2_scores']) # Store filepath, regex for matching the error message that should be # raised when reading the file, and whether the file should be matched # by the sniffer (True) or not (False). self.invalid_fps = map(lambda e: (get_data_path(e[0]), e[1], e[2]), [ ('empty', 'end of file.*Eigvals header', False), ('whitespace_only', 'Eigvals header not found', False), ('ordination_error1', 'Eigvals header not found', False), ('ordination_error2', 'Proportion explained header not found', False), ('ordination_error3', 'Species header not found', True), ('ordination_error4', 'Site header not found', True), ('ordination_error5', 'Biplot header not found', True), ('ordination_error6', 'Site constraints header not found', True), ('ordination_error7', 'empty line', False), ('ordination_error8', '9.*Proportion explained.*8', True), ('ordination_error9', '2 values.*1 in row 1', True), ('ordination_error10', '2 values.*1 in row 1', True), ('ordination_error11', 'Site constraints ids and site ids', True), ('ordination_error12', '9.*Eigvals.*8', True), ('ordination_error13', '9.*Proportion explained.*8', True), ('ordination_error14', 'Site is 0: 9 x 0', True), ('ordination_error15', '9 values.*8 in row 1', True), ('ordination_error16', 'Biplot is 0: 3 x 0', True), ('ordination_error17', '3 values.*2 in row 1', True), ('ordination_error18', 'proportion explained.*eigvals: 8 != 9', True), ('ordination_error19', 'coordinates.*species.*eigvals: 1 != 2', True), ('ordination_error20', 'coordinates.*site.*eigvals: 1 != 2', True), ('ordination_error21', 'one eigval', False), ('ordination_error22', 'end of file.*blank line', False), ('ordination_error23', 'end of file.*Proportion explained section', True), ('ordination_error24', 'end of file.*row 2.*Species section', True) ]) class OrdinationResultsReaderWriterTests(OrdinationTestData): def setUp(self): super(OrdinationResultsReaderWriterTests, self).setUp() # define in-memory results, one for each of the valid files in # self.valid_fps # CA results axes_ids = ['CA1', 'CA2'] species_ids = ['Species1', 'Species2', 'Species3'] site_ids = ['Site1', 'Site2', 'Site3'] eigvals = pd.Series([0.0961330159181, 0.0409418140138], axes_ids) species = pd.DataFrame([[0.408869425742, 0.0695518116298], [-0.1153860437, -0.299767683538], [-0.309967102571, 0.187391917117]], index=species_ids, columns=axes_ids) site = pd.DataFrame([[-0.848956053187, 0.882764759014], [-0.220458650578, -1.34482000302], [1.66697179591, 0.470324389808]], index=site_ids, columns=axes_ids) biplot = None site_constraints = None prop_explained = None ca_scores = OrdinationResults( 'CA', 'Correspondence Analysis', eigvals=eigvals, features=species, samples=site, biplot_scores=biplot, sample_constraints=site_constraints, proportion_explained=prop_explained) # CCA results axes_ids = ['CCA%d' % i for i in range(1, 10)] species_ids = ['Species0', 'Species1', 'Species2', 'Species3', 'Species4', 'Species5', 'Species6', 'Species7', 'Species8'] site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5', 'Site6', 'Site7', 'Site8', 'Site9'] eigvals = pd.Series([0.366135830393, 0.186887643052, 0.0788466514249, 0.082287840501, 0.0351348475787, 0.0233265839374, 0.0099048981912, 0.00122461669234, 0.000417454724117], axes_ids) species = pd.DataFrame(np.loadtxt( get_data_path('ordination_exp_Ordination_CCA_species')), index=species_ids, columns=axes_ids) site = pd.DataFrame( np.loadtxt(get_data_path('ordination_exp_Ordination_CCA_site')), index=site_ids, columns=axes_ids) biplot = pd.DataFrame( [[-0.169746767979, 0.63069090084, 0.760769036049], [-0.994016563505, 0.0609533148724, -0.0449369418179], [0.184352565909, -0.974867543612, 0.0309865007541]], columns=axes_ids[:3]) site_constraints = pd.DataFrame(np.loadtxt( get_data_path('ordination_exp_Ordination_CCA_site_constraints')), index=site_ids, columns=axes_ids) prop_explained = None cca_scores = OrdinationResults('CCA', 'Canonical Correspondence Analysis', eigvals=eigvals, features=species, samples=site, biplot_scores=biplot, sample_constraints=site_constraints, proportion_explained=prop_explained) # PCoA results axes_ids = ['PC%d' % i for i in range(1, 10)] species_ids = None site_ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593', 'PC.355', 'PC.607', 'PC.634'] eigvals = pd.Series([0.512367260461, 0.300719094427, 0.267912066004, 0.208988681078, 0.19169895326, 0.16054234528, 0.15017695712, 0.122457748167, 0.0], axes_ids) species = None site = pd.DataFrame( np.loadtxt(get_data_path('ordination_exp_Ordination_PCoA_site')), index=site_ids, columns=axes_ids) biplot = None site_constraints = None prop_explained = pd.Series([0.267573832777, 0.15704469605, 0.139911863774, 0.109140272454, 0.100111048503, 0.0838401161912, 0.0784269939011, 0.0639511763509, 0.0], axes_ids) pcoa_scores = OrdinationResults('PCoA', 'Principal Coordinate Analysis', eigvals=eigvals, features=species, samples=site, biplot_scores=biplot, sample_constraints=site_constraints, proportion_explained=prop_explained) # RDA results axes_ids = ['RDA%d' % i for i in range(1, 8)] species_ids = ['Species0', 'Species1', 'Species2', 'Species3', 'Species4', 'Species5'] site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5', 'Site6', 'Site7', 'Site8', 'Site9'] eigvals = pd.Series([25.8979540892, 14.9825779819, 8.93784077262, 6.13995623072, 1.68070536498, 0.57735026919, 0.275983624351], axes_ids) species = pd.DataFrame(np.loadtxt( get_data_path('ordination_exp_Ordination_RDA_species')), index=species_ids, columns=axes_ids) site = pd.DataFrame( np.loadtxt(get_data_path('ordination_exp_Ordination_RDA_site')), index=site_ids, columns=axes_ids) biplot = pd.DataFrame( [[0.422650019179, -0.559142585857, -0.713250678211], [0.988495963777, 0.150787422017, -0.0117848614073], [-0.556516618887, 0.817599992718, 0.147714267459], [-0.404079676685, -0.9058434809, -0.127150316558]], columns=axes_ids[:3]) site_constraints = pd.DataFrame(np.loadtxt( get_data_path('ordination_exp_Ordination_RDA_site_constraints')), index=site_ids, columns=axes_ids) prop_explained = None rda_scores = OrdinationResults( 'RDA', 'Redundancy Analysis', eigvals=eigvals, features=species, samples=site, biplot_scores=biplot, sample_constraints=site_constraints, proportion_explained=prop_explained) self.ordination_results_objs = [ca_scores, cca_scores, pcoa_scores, rda_scores] def test_read_valid_files(self): for fp, obj in zip(self.valid_fps, self.ordination_results_objs): obs = _ordination_to_ordination_results(fp) assert_ordination_results_equal( obs, obj, ignore_method_names=True, ignore_axis_labels=True) def test_read_invalid_files(self): for invalid_fp, error_msg_regexp, _ in self.invalid_fps: with self.assertRaisesRegex(OrdinationFormatError, error_msg_regexp): _ordination_to_ordination_results(invalid_fp) def test_write(self): for fp, obj in zip(self.valid_fps, self.ordination_results_objs): fh = io.StringIO() _ordination_results_to_ordination(obj, fh) obs = fh.getvalue() fh.close() with io.open(fp) as fh: exp = fh.read() npt.assert_equal(obs, exp) def test_roundtrip_read_write(self): for fp in self.valid_fps: # Read. obj1 = _ordination_to_ordination_results(fp) # Write. fh = io.StringIO() _ordination_results_to_ordination(obj1, fh) fh.seek(0) # Read. obj2 = _ordination_to_ordination_results(fh) fh.close() assert_ordination_results_equal(obj1, obj2) class SnifferTests(OrdinationTestData): def setUp(self): super(SnifferTests, self).setUp() def test_matches_and_nonmatches(self): # Sniffer should match all valid files, and will match some invalid # ones too because it doesn't exhaustively check the entire file. for fp in self.valid_fps: self.assertEqual(_ordination_sniffer(fp), (True, {})) for fp, _, expected_sniffer_match in self.invalid_fps: self.assertEqual(_ordination_sniffer(fp), (expected_sniffer_match, {})) if __name__ == '__main__': main()
kdmurray91/scikit-bio
skbio/io/format/tests/test_ordination.py
Python
bsd-3-clause
11,672
[ "scikit-bio" ]
dca988630c195191649d234b52223909ddc97316b784f249e7ec39d535f5facb
""" perform exon-level counting using DEXSeq """ import sys import os from bcbio.utils import R_package_path, file_exists, safe_makedir from bcbio.distributed.transaction import file_transaction from bcbio.provenance import do from bcbio import bam from bcbio.log import logger import bcbio.pipeline.datadict as dd def bcbio_run(data): out_dir = os.path.join(dd.get_work_dir(data), "dexseq") safe_makedir(out_dir) sample_name = dd.get_sample_name(data) out_file = os.path.join(out_dir, sample_name + ".dexseq") bam_file = dd.get_work_bam(data) dexseq_gff = dd.get_dexseq_gff(data) stranded = dd.get_strandedness(data) counts = run_count(bam_file, dexseq_gff, stranded, out_file, data) data = dd.set_dexseq_counts(data, counts) return data def run_count(bam_file, dexseq_gff, stranded, out_file, data): """ run dexseq_count on a BAM file """ assert file_exists(bam_file), "%s does not exist." % bam_file sort_order = bam._get_sort_order(bam_file, {}) assert sort_order, "Cannot determine sort order of %s." % bam_file strand_flag = _strand_flag(stranded) assert strand_flag, "%s is not a valid strandedness value." % stranded if not dexseq_gff: logger.info("No DEXSeq GFF file was found, skipping exon-level counting.") return None elif not file_exists(dexseq_gff): logger.info("%s was not found, so exon-level counting is being " "skipped." % dexseq_gff) return None dexseq_count = _dexseq_count_path() if not dexseq_count: logger.info("DEXseq is not installed, skipping exon-level counting.") return None if dd.get_aligner(data) == "bwa": logger.info("Can't use DEXSeq with bwa alignments, skipping exon-level counting.") return None sort_flag = "name" if sort_order == "queryname" else "pos" is_paired = bam.is_paired(bam_file) paired_flag = "yes" if is_paired else "no" bcbio_python = sys.executable if file_exists(out_file): return out_file cmd = ("{bcbio_python} {dexseq_count} -f bam -r {sort_flag} -p {paired_flag} " "-s {strand_flag} {dexseq_gff} {bam_file} {tx_out_file}") message = "Counting exon-level counts with %s and %s." % (bam_file, dexseq_gff) with file_transaction(data, out_file) as tx_out_file: do.run(cmd.format(**locals()), message) return out_file def _strand_flag(stranded): strand_flag = {"unstranded": "no", "firststrand": "reverse", "secondstrand": "yes"} return strand_flag.get(stranded, None) def _dexseq_count_path(): package_path = R_package_path("DEXSeq") if not package_path: return None return os.path.join(package_path, "python_scripts", "dexseq_count.py") def _dexseq_gtf_path(genome_dir): return os.path.join(genome_dir, "rnaseq", "ref-transcripts.dexseq.gff")
hjanime/bcbio-nextgen
bcbio/rnaseq/dexseq.py
Python
mit
2,919
[ "BWA" ]
7d0092966d9c03387d649b6aa09cf159321458ada9597f5803a5d7d238f61209
#!/usr/bin/python # Malwina Prater, mn367@cam.ac.uk, 2017, Copyright # Centre for Trophoblast Research, University of Cambridge # # Script version: v01. # # Script to calculate the percent of transcripts mapping to rRNA # # INPUTS : # 1. HTseq_counts file # 2. Original reference transcriptome alignned to # # USAGE : # For producing table(s) with rRNA and MT counts for each sample use commands like that: # # ./rRNA_MT_count.py --gtf /Users/malwina/Documents/CTR-Data/genomes/Mus_musculus/mm10/Mus_musculus.GRCm38.84.gtf --htseq C17_3_S20_Aligned.out.srt.bam_htseq_combined_counts.txt # # import modules: import os,sys from optparse import OptionParser import re # parse in the user options: parser = OptionParser(usage="%prog [-x Excel [-i imagefile] [-s squares]", version="%prog 0.1") parser.add_option("--htseq", dest="FileName", type="string", action="store") parser.add_option("--gtf", dest="GTF", type="string", action="store") (options, args) = parser.parse_args() #files = sys.argv[] HTSEQ_COUNTS = options.FileName GTF = options.GTF # check if files supplied exist: try: handle = open(GTF, "rU") handle.close() except: print "\nError->\tGTF File: %s does not exist\n" % GTF sys.exit() try: handle = open(HTSEQ_COUNTS, "rU") handle.close() except: print "\nError->\tFile: %s does not exist\n" % HTSEQ_COUNTS sys.exit() # # First job is to extract all the identifiers of genes/transcripts mapping to the rRNA and MT genes and store in 2 arrays # rRNA_identifiers = {} MT_identifiers = {} with open(GTF, "rU") as handle: #line = handle.readline() for line in handle: line.rstrip('\n') if 'gene_biotype "rRNA"' in line: identifier = line identifier = re.sub('.*gene_id "', '', identifier) identifier = re.sub('"; gene_version.*\n', '', identifier) rRNA_identifiers[identifier] = 1 if 'MT' in line: identifier = line identifier = re.sub('.*gene_id "', '', identifier) identifier = re.sub('"; gene_version.*\n', '', identifier) MT_identifiers[identifier] = 1 handle.close() #print("rRNA:") #print(rRNA_identifiers.keys()) #print("MT:") #print(MT_identifiers.keys()) # # Second job is to go through the HTSEQ-couts and count reads matching the rRNA identifiers # Cummulative_rRNA_Count = 0 rRNA_genes = 0 ReadCount = 0 line_number = 0 MT_genes = 0; Cummulative_MT_Count = 0; with open(HTSEQ_COUNTS, "rU") as handle: for line in handle: line.rstrip('\n') split_line = line.split("\t") if line_number > 0: if split_line[0] in rRNA_identifiers.keys(): # if rRNA_identifiers[gene_id] rRNA_genes += 1 Cummulative_rRNA_Count += int(split_line[1]) if split_line[0] in MT_identifiers.keys(): MT_genes += 1 Cummulative_MT_Count += int(split_line[1]) ReadCount += int(split_line[1]) line_number += 1 handle.close() #print(Cummulative_MT_Count) #print(Cummulative_rRNA_Count) # # wiritng the output files: # out = HTSEQ_COUNTS + '_rRNAmtRNACounts.txt'; out = re.sub('.txt_', '_', out) print "Summary output file: ", out, "\n" OUT = open(out, "w") OUT.write('HT-SEQ file name: \t' + HTSEQ_COUNTS + '\n\n') OUT.write('GTF file name: \t\t' + GTF + '\n\n\n') OUT.write('---------------------------------------------------------------------------------' + '\n') OUT.write(' rRNA and MT identifiers\n') OUT.write('---------------------------------------------------------------------------------' + '\n') OUT.write('No. of rRNA identifiers: ' + str(len(rRNA_identifiers.keys())) + '\n') # PRINT size of this hash OUT.write('No. of MT identifiers: ' + str(len(MT_identifiers.keys())) + '\n') # PRINT size of this hash OUT.write('\n\n') OUT.write('---------------------------------------------------------------------------------' + '\n') OUT.write(' HTSEQ mapping summary\n') OUT.write('---------------------------------------------------------------------------------' + '\n') OUT.write('ReadCount: ' + str(ReadCount) + '\n\n') #OUT.write(' Number of rRNA genes: ' + str(rRNA_genes) + '\n') OUT.write('Total no. of rRNA transcripts: ' + str(Cummulative_rRNA_Count) + '\n') perc_rRNA = 100*float(Cummulative_rRNA_Count)/float(ReadCount) perc_rRNA = str(round(perc_rRNA, 3)) OUT.write('Percent rRNA mapped reads: ' + str(Cummulative_rRNA_Count) + ' / ' + str(ReadCount) + ' * 100 = ' + perc_rRNA + '%\n\n') #OUT.write('\n Number of MT genes: ' + str(MT_genes) + '\n') OUT.write('Total no. of MT transcripts: ' + str(Cummulative_MT_Count) + '\n') perc_MT = 100*float(Cummulative_MT_Count)/float(ReadCount) perc_MT = str(round(perc_MT, 3)) OUT.write('Percent MT mapped reads: ' + str(Cummulative_MT_Count) + ' / ' + str(ReadCount) + ' * 100 = ' + perc_MT + '%\n\n') OUT.close()
CTR-BFX/CambridgeHackathon
rRNA_MT_count.py
Python
gpl-3.0
4,956
[ "HTSeq" ]
6ce25d5cdf128bfb2cb377dfd6bbb32399f5a2561b23255eb6aaaa0d1779cdcc
import logging, datetime, tempfile from time import time from dirac.lib.base import BaseController, render, c, jsonify, request, response from dirac.lib.diset import getRPCClient, getTransferClient from DIRAC import gConfig, gLogger, S_OK from DIRAC.Core.Utilities.List import sortList, uniqueElements from DIRAC.Core.Utilities import Time from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient from DIRAC.Core.Utilities.DictCache import DictCache from DIRAC.WorkloadManagementSystem.Service.JobPolicy import JobPolicy, RIGHT_GET_INFO from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations from DIRAC.RequestManagementSystem.Client.Request import Request import dirac.lib.credentials as credentials #from DIRAC.Interfaces.API.Dirac import Dirac log = logging.getLogger(__name__) numberOfJobs = 25 pageNumber = 0 globalSort = [] globalSort = [["JobID","DESC"]] class JobmonitorController(BaseController): __imgCache = DictCache() ################################################################################ def display(self): pagestart = time() group = credentials.getSelectedGroup() if group == "visitor" and credentials.getUserDN() == "": return render("/login.mako") c.select = self.__getSelectionData() if not c.select.has_key("extra"): groupProperty = credentials.getProperties(group) if "JobAdministrator" not in groupProperty and "JobSharing" not in groupProperty: c.select["extra"] = {"owner":credentials.getUsername()} return render("jobs/JobMonitor.mako") ################################################################################ def __getEligibleOwners( self, userDN, group ): """ Get users which jobs can be in principle shown in the page """ owners = [] allInfo = Operations( group = group ).getValue('/Services/JobMonitoring/GlobalJobsInfo', False ) jobPolicy = JobPolicy( userDN, group, allInfo = allInfo ) result = jobPolicy.getControlledUsers( RIGHT_GET_INFO ) if not result['OK']: return result elif result['Value']: allowedUsers = [] if result['Value'] != "ALL": for aUser, aGroup in result['Value']: allowedUsers.append( aUser ) allowedUsers = list( set( allowedUsers ) ) RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getOwners() if result["OK"]: if len(result["Value"])>0: owners.append( str("All") ) for owner in result["Value"]: if allowedUsers and owner in allowedUsers: owners.append( str( owner ) ) elif not allowedUsers: owners.append( str( owner ) ) else: gLogger.error( "RPC.getOwners() return error:", result["Message"] ) return result return S_OK( owners ) ################################################################################ def __getJobSummary(self,jobs,head): valueList = [] for i in jobs: valueList.append({"id":str(i[2]),"status":str(i[6]),"minorStatus":str(i[10]),"applicationStatus":str(i[11]),"site":str(i[26]),"jobname":str(i[22]),"lastUpdate":str(i[25]),"owner":str(i[31]),"submissionTime":str(i[12]),"signTime":str(i[3])}) return valueList ################################################################################ @jsonify def submit(self): pagestart = time() RPC = getRPCClient("WorkloadManagement/JobMonitoring") user = str(credentials.getUsername()) userDN = str(credentials.getUserDN()) group = str(credentials.getSelectedGroup()) result = RPC.getOwners() haveJobsToDisplay = False if result["OK"]: resultEligible = self.__getEligibleOwners( userDN, group ) if resultEligible['OK']: for own in resultEligible['Value']: if own in result["Value"]: # There is something to display probably haveJobsToDisplay = True break if not haveJobsToDisplay: c.result = {"success":"false","error":"You don't have any jobs eligible to display"} return c.result else: c.result = {"success":"false","error":"Failed to evaluate eligible users"} return c.result else: c.result = {"success":"false","error":result["Message"]} return c.result req = self.__request() gLogger.always("getJobPageSummaryWeb(%s,%s,%s,%s)" % (req,globalSort,pageNumber,numberOfJobs)) result = RPC.getJobPageSummaryWeb(req,globalSort,pageNumber,numberOfJobs) gLogger.always(" - REZ: " %result) if result["OK"]: result = result["Value"] gLogger.info("ReS",result) if result.has_key("TotalRecords"): if result["TotalRecords"] > 0: if result.has_key("ParameterNames") and result.has_key("Records"): if len(result["ParameterNames"]) > 0: if len(result["Records"]) > 0: c.result = [] jobs = result["Records"] head = result["ParameterNames"] headLength = len(head) for i in jobs: tmp = {} for j in range(0,headLength): tmp[head[j]] = i[j] c.result.append(tmp) total = result["TotalRecords"] timestamp = Time.dateTime().strftime("%Y-%m-%d %H:%M [UTC]") if result.has_key("Extras"): st = self.__dict2string(req) extra = result["Extras"] c.result = {"success":"true","result":c.result,"total":total,"extra":extra,"request":st,"date":timestamp} else: c.result = {"success":"true","result":c.result,"total":total,"date":timestamp} else: c.result = {"success":"false","result":"","error":"There are no data to display"} else: c.result = {"success":"false","result":"","error":"ParameterNames field is missing"} else: c.result = {"success":"false","result":"","error":"Data structure is corrupted"} else: c.result = {"success":"false","result":"","error":"There were no data matching your selection"} else: c.result = {"success":"false","result":"","error":"Data structure is corrupted"} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("\033[0;31mJOB SUBMIT REQUEST:\033[0m %s" % (time() - pagestart)) return c.result ################################################################################ def __dict2string(self,req): result = "" try: for key,value in req.iteritems(): result = result + str(key) + ": " + ", ".join(value) + "; " except Exception, x: gLogger.info("\033[0;31m Exception: \033[0m %s" % x) result = result.strip() result = result[:-1] return result ################################################################################ def __getSelectionData(self): callback = {} group = credentials.getSelectedGroup() user = str(credentials.getUsername()) if len(request.params) > 0: tmp = {} for i in request.params: tmp[i] = str(request.params[i]) callback["extra"] = tmp if callback["extra"].has_key("prod"): callback["extra"]["prod"] = callback["extra"]["prod"].zfill(8) if callback["extra"]["prod"] == "00000000": callback["extra"]["prod"] = "" gLogger.info(" - ",callback["extra"]) if user == "Anonymous": callback["prod"] = [["Insufficient rights"]] else: RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getProductionIds() if result["OK"]: prod = [] prods = result["Value"] if len(prods)>0: prod.append([str("All")]) tmp = [] for keys in prods: try: id = str(int(keys)).zfill(8) except: id = str(keys) tmp.append(str(id)) tmp.sort(reverse=True) for i in tmp: prod.append([str(i)]) else: prod = [["Nothing to display"]] else: gLogger.error("RPC.getProductionIds() return error: %s" % result["Message"]) prod = [["Error happened on service side"]] callback["prod"] = prod ### RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getSites() if result["OK"]: tier1 = gConfig.getValue("/Website/PreferredSites",[]) # Always return a list site = [] if len(result["Value"])>0: s = list(result["Value"]) site.append([str("All")]) for i in tier1: site.append([str(i)]) for i in s: if i not in tier1: site.append([str(i)]) else: site = [["Nothing to display"]] else: gLogger.error("RPC.getSites() return error: %s" % result["Message"]) site = [["Error happened on service side"]] callback["site"] = site ### result = RPC.getStates() if result["OK"]: stat = [] if len(result["Value"])>0: stat.append([str("All")]) for i in result["Value"]: stat.append([str(i)]) else: stat = [["Nothing to display"]] else: gLogger.error("RPC.getStates() return error: %s" % result["Message"]) stat = [["Error happened on service side"]] callback["status"] = stat ### result = RPC.getMinorStates() if result["OK"]: stat = [] if len(result["Value"])>0: stat.append([str("All")]) for i in result["Value"]: i = i.replace(",",";") stat.append([i]) else: stat = [["Nothing to display"]] else: gLogger.error("RPC.getMinorStates() return error: %s" % result["Message"]) stat = [["Error happened on service side"]] callback["minorstat"] = stat ### result = RPC.getApplicationStates() if result["OK"]: app = [] if len(result["Value"])>0: app.append([str("All")]) for i in result["Value"]: i = i.replace(",",";") app.append([i]) else: app = [["Nothing to display"]] else: gLogger.error("RPC.getApplicationstates() return error: %s" % result["Message"]) app = [["Error happened on service side"]] callback["app"] = app ### result = RPC.getJobTypes() if result["OK"]: types = [] if len(result["Value"])>0: types.append([str("All")]) for i in result["Value"]: i = i.replace(",",";") types.append([i]) else: types = [["Nothing to display"]] else: gLogger.error("RPC.getJobTypes() return error: %s" % result["Message"]) types = [["Error happened on service side"]] callback["types"] = types ### userDN = credentials.getUserDN() result = self.__getEligibleOwners( userDN, group ) if not result['OK']: callback["owner"] = [["Failed to evaluate access rights"]] else: eligibleOwners = result['Value'] if not eligibleOwners: callback["owner"] = [["Nothing to display"]] else: callback["owner"] = [ [str( own )] for own in eligibleOwners ] return callback ################################################################################ def __request(self): gLogger.always("!!! PARAMS: ",str(request.params)) req = {} group = credentials.getSelectedGroup() user = str(credentials.getUsername()) global pageNumber global numberOfJobs global globalSort if request.params.has_key("limit") and len(request.params["limit"]) > 0: numberOfJobs = int(request.params["limit"]) if request.params.has_key("start") and len(request.params["start"]) > 0: pageNumber = int(request.params["start"]) else: pageNumber = 0 else: numberOfJobs = 25 pageNumber = 0 if request.params.has_key("id") and len(request.params["id"]) > 0: testString = str(request.params["id"]) testString = testString.strip(';, ') testString = testString.split(', ') if len(testString) == 1: testString = testString[0].split('; ') if len(testString) == 1: testString = testString[0].split(' ') if len(testString) == 1: testString = testString[0].split(',') if len(testString) == 1: testString = testString[0].split(';') if len(testString) == 1: req["JobID"] = testString[0] else: req["JobID"] = testString else: req["JobID"] = testString else: req["JobID"] = testString else: req["JobID"] = testString else: req["JobID"] = testString for i in req["JobID"]: testI = i.split('-') if len(testI) == 2: testI[0] = testI[0].strip(' ') testI[1] = testI[1].strip(' ') rangeID = range(testI[0],testI[1]) gLogger.info("RANGE:",rangeID) else: groupProperty = credentials.getProperties(group) gLogger.always("### groupProperty: ",str(groupProperty)) result = gConfig.getOption("/Website/ListSeparator") if result["OK"]: separator = result["Value"] else: separator = ":::" if request.params.has_key("prod") and len(request.params["prod"]) > 0: if str(request.params["prod"]) != "All": req["JobGroup"] = str(request.params["prod"]).split(separator) if request.params.has_key("site") and len(request.params["site"]) > 0: if str(request.params["site"]) != "All": req["Site"] = [x.strip() for x in str(request.params["site"]).split(separator)] if request.params.has_key("status") and len(request.params["status"]) > 0: if str(request.params["status"]) != "All": req["Status"] = str(request.params["status"]).split(separator) if request.params.has_key("minorstat") and len(request.params["minorstat"]) > 0: if str(request.params["minorstat"]) != "All": req["MinorStatus"] = str(request.params["minorstat"]).split(separator) if request.params.has_key("app") and len(request.params["app"]) > 0: if str(request.params["app"]) != "All": req["ApplicationStatus"] = str(request.params["app"]).split(separator) if request.params.has_key("types") and len(request.params["types"]) > 0: if str(request.params["types"]) != "All": req["JobType"] = str(request.params["types"]).split(separator) if not "JobAdministrator" in groupProperty and not "JobSharing" in groupProperty: if not request.params.has_key("globalStat"): req["Owner"] = str(user) else: if request.params.has_key("owner") and len(request.params["owner"]) > 0: if str(request.params["owner"]) != "All": req["Owner"] = str(request.params["owner"]).split(separator) if request.params.has_key("startDate") and len(request.params["startDate"]) > 0: if str(request.params["startDate"]) != "YYYY-mm-dd": if request.params.has_key("startTime") and len(request.params["startTime"]) > 0: req["FromDate"] = str(request.params["startDate"] + " " + request.params["startTime"]) else: req["FromDate"] = str(request.params["startDate"]) if request.params.has_key("endDate") and len(request.params["endDate"]) > 0: if str(request.params["endDate"]) != "YYYY-mm-dd": if request.params.has_key("endTime") and len(request.params["endTime"]) > 0: req["ToDate"] = str(request.params["endDate"] + " " + request.params["endTime"]) else: req["ToDate"] = str(request.params["endDate"]) if request.params.has_key("date") and len(request.params["date"]) > 0: if str(request.params["date"]) != "YYYY-mm-dd": req["LastUpdate"] = str(request.params["date"]) if request.params.has_key("sort") and len(request.params["sort"]) > 0: globalSort = str(request.params["sort"]) key,value = globalSort.split(" ") globalSort = [[str(key),str(value)]] else: globalSort = [["JobID","DESC"]] gLogger.always("REQUEST:",req) return req ################################################################################ @jsonify def action(self): pagestart = time() if request.params.has_key("getJDL") and len(request.params["getJDL"]) > 0: id = int(request.params["getJDL"]) return self.__getJdl(id) elif request.params.has_key("getStandardOutput") and len(request.params["getStandardOutput"]) > 0: id = int(request.params["getStandardOutput"]) return self.__getStandardOutput(id) elif request.params.has_key("getBasicInfo") and len(request.params["getBasicInfo"]) > 0: id = int(request.params["getBasicInfo"]) return self.__getBasicInfo(id) elif request.params.has_key("LoggingInfo") and len(request.params["LoggingInfo"]) > 0: id = int(request.params["LoggingInfo"]) return self.__getLoggingInfo(id) elif request.params.has_key("getParams") and len(request.params["getParams"]) > 0: id = int(request.params["getParams"]) return self.__getParams(id) elif request.params.has_key("delete") and len(request.params["delete"]) > 0: id = request.params["delete"] id = id.split(",") id = [int(i) for i in id ] return self.__delJobs(id) elif request.params.has_key("kill") and len(request.params["kill"]) > 0: id = request.params["kill"] id = id.split(",") id = [int(i) for i in id ] return self.__killJobs(id) elif request.params.has_key("reschedule") and len(request.params["reschedule"]) > 0: id = request.params["reschedule"] id = id.split(",") id = [int(i) for i in id ] return self.__rescheduleJobs(id) elif request.params.has_key("reset") and len(request.params["reset"]) > 0: id = request.params["reset"] id = id.split(",") id = [int(i) for i in id ] return self.__resetJobs(id) elif request.params.has_key("pilotStdOut") and len(request.params["pilotStdOut"]) > 0: id = request.params["pilotStdOut"] return self.__pilotGetOutput("out",int(id)) elif request.params.has_key("pilotStdErr") and len(request.params["pilotStdErr"]) > 0: id = request.params["pilotStdErr"] return self.__pilotGetOutput("err",int(id)) elif request.params.has_key("LogURL") and len(request.params["LogURL"]) > 0: id = request.params["LogURL"] return self.__pilotGetURL(int(id)) elif request.params.has_key("getStagerReport") and len(request.params["getStagerReport"]) > 0: id = request.params["getStagerReport"] return self.__getStagerReport(int(id)) elif request.params.has_key("getSandBox") and len(request.params["getSandBox"]) > 0: id = request.params["getSandBox"] return self.__getSandBox(int(id)) elif request.params.has_key("refreshSelection") and len(request.params["refreshSelection"]) > 0: return self.__getSelectionData() elif request.params.has_key("getStat") and len(request.params["getStat"]) > 0: selector = str(request.params["getStat"]) return self.__getStats(selector) elif request.params.has_key("globalStat"): return self.__globalStat() elif request.params.has_key("getPageOptions") and len(request.params["getPageOptions"]) > 0: return self.__getPageOptions() elif request.params.has_key("getPlotSrc") and len(request.params["getPlotSrc"]) > 0: id = request.params["getPlotSrc"] if request.params.has_key("type") and len(request.params["type"]) > 0: type = request.params["type"] else: type = "jobsBySite" if request.params.has_key("time") and len(request.params["time"]) > 0: timeToSet = request.params["time"] else: timeToSet = "week" if request.params.has_key("img") and len(request.params["img"]) > 0: img = request.params["img"] else: img = "False" return self.__getPlotSrc( type, id, timeToSet, img ) elif request.params.has_key( "getPending" ) and len( request.params["getPending"] ) > 0: return self.__getPending( request.params["getPending"] ) elif request.params.has_key( "canRunJobs" ) and request.params["canRunJobs"]: return self.__canRunJobs() elif request.params.has_key("getProxyStatus") and len(request.params["getProxyStatus"]) > 0: return self.__getProxyStatus() elif request.params.has_key("getLaunchpadOpts") and len(request.params["getLaunchpadOpts"]) > 0: return self.__getLaunchpadOpts() else: c.result = {"success":"false","error":"The request parameters can not be recognized or they are not defined"} return c.result ################################################################################ def __getPageOptions( self ): gLogger.info( "start __getPageOptions" ) callback = dict() for i in [ "ShowRequest" , "ShowStagerReport" , "ShowLogFile" ]: value = gConfig.getValue( "/Website/JobMonitor/Context/%s" % i , 'false' ) callback[ i ] = value gLogger.debug( "Page options: %s" % callback ) gLogger.info( "end __getPageOptions" ) return { "success" : "true" , "result" : callback } ################################################################################ def __getPlatform( self ): gLogger.info( "start __getPlatform" ) path = "/Resources/Computing/OSCompatibility" result = gConfig.getOptionsDict( path ) gLogger.debug( result ) if not result[ "OK" ]: return False platformDict = result[ "Value" ] platform = platformDict.keys() gLogger.debug( "platform: %s" % platform ) gLogger.info( "end __getPlatform" ) return platform ################################################################################ def __getOptionsFromCS( self , path = "/Website/Launchpad/Options" , delimiter = "," ): gLogger.info( "start __getOptionsFromCS" ) result = gConfig.getOptionsDict( path ) gLogger.always( result ) if not result["OK"]: return False options = result["Value"] for i in options.keys(): options[ i ] = options[ i ].split( delimiter ) result = gConfig.getSections(path) if result["OK"]: sections = result["Value"] if len(sections) > 0: for i in sections: options[ i ] = self.__getOptionsFromCS( path + '/' + i , delimiter ) gLogger.always( "options: %s" % options ) gLogger.info( "end __getOptionsFromCS" ) return options ################################################################################ def __getLaunchpadOpts(self): gLogger.info( "start __getLaunchpadOpts" ) delimiter = gConfig.getValue( "/Website/Launchpad/ListSeparator" , ',' ) options = self.__getOptionsFromCS( delimiter = delimiter) platform = self.__getPlatform() if platform and options: if not options.has_key( "Platform" ): options[ "Platform" ] = platform else: csPlatform = list( options[ "Platform" ] ) allPlatforms = csPlatform + platform platform = uniqueElements( allPlatforms ) options[ "Platform" ] = platform gLogger.debug( "Combined options from CS: %s" % options ) override = gConfig.getValue( "/Website/Launchpad/OptionsOverride" , False) gLogger.info( "end __getLaunchpadOpts" ) return {"success":"true","result":options,"override":override,"separator":delimiter} ################################################################################ def __getStats(self,selector): gLogger.always(" --- selector : %s" % selector) # import sys # sys.stdout.flush() req = self.__request() selector = str(selector) RPC = getRPCClient("WorkloadManagement/JobMonitoring") if selector == "Minor status": selector = "MinorStatus" elif selector == "Application status": selector = "ApplicationStatus" gLogger.always(" --- getJobStats(%s,%s) : " % (str(selector),str(req))) result = RPC.getJobStats(selector,req) if result["OK"]: c.result = [] result = dict(result["Value"]) keylist = result.keys() keylist.sort() if selector == "Site": tier1 = gConfig.getValue("/Website/PreferredSites",[]) if len(tier1) > 0: tier1.sort() for i in tier1: if result.has_key(i): countryCode = i.rsplit(".",1)[1] c.result.append({"Key":i,"Value":result[i],"Code":countryCode}) for key in keylist: if selector == "Site" and tier1: if key not in tier1: try: countryCode = key.rsplit(".",1)[1] except: countryCode = "Unknown" c.result.append({"Key":key,"Value":result[key],"Code":countryCode}) elif selector == "Site" and not tier1: try: countryCode = key.rsplit(".",1)[1] except: countryCode = "Unknown" c.result.append({"Key":key,"Value":result[key],"Code":countryCode}) else: c.result.append({"Key":key,"Value":result[key]}) c.result = {"success":"true","result":c.result} else: c.result = {"success":"false","error":result["Message"]} return c.result ################################################################################ def __canRunJobs(self): groupPropertie = credentials.getProperties( credentials.getSelectedGroup() ) if "NormalUser" in groupPropertie: return True else: return False ################################################################################ def __getProxyStatus(self,secondsOverride = None): from DIRAC.FrameworkSystem.Client.ProxyManagerClient import ProxyManagerClient proxyManager = ProxyManagerClient() group = str(credentials.getSelectedGroup()) if group == "visitor": return {"success":"false","error":"User is anonymous or is not registered in the system"} userDN = str(credentials.getUserDN()) if secondsOverride and str(secondsOverride).isdigit(): validSeconds = int(secondsOverride) else: defaultSeconds = 24 * 3600 + 60 # 24H + 1min validSeconds = gConfig.getValue("/Registry/DefaultProxyLifeTime",defaultSeconds) gLogger.info("\033[0;31m userHasProxy(%s, %s, %s) \033[0m" % (userDN,group,validSeconds)) result = proxyManager.userHasProxy(userDN,group,validSeconds) if result["OK"]: if result["Value"]: c.result = {"success":"true","result":"true"} else: c.result = {"success":"true","result":"false"} else: c.result = {"success":"false","error":"false"} gLogger.info("\033[0;31m PROXY: \033[0m",result) return c.result ################################################################################ def __getJdl(self,id): RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getJobJDL( id, False ) if result["OK"]: c.result = result["Value"] c.result = {"success":"true","result":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("JDL:",id) return c.result ################################################################################ def __getBasicInfo(self,id): RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getJobSummary(id) if result["OK"]: itemList = result["Value"] c.result = [] for key,value in itemList.items(): c.result.append([key,value]) c.result = {"success":"true","result":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("BasicInfo:",id) return c.result ################################################################################ def __getLoggingInfo(self,id): RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getJobLoggingInfo(id) if result["OK"]: c.result = result["Value"] c.result = {"success":"true","result":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("LoggingInfo:",id) return c.result ################################################################################ def __getPending(self,id): try: id = int(id) except Exception,x: c.result = {"success":"false","error":"%s" % str(x)} return c.result RPC = getRPCClient( "RequestManagement/ReqManager" ) result = RPC.readRequestsForJobs( [id] ) if result["OK"]: c.result = [] if id in result['Value']['Successful']: req = Request(result['Value']['Successful'][id]).getDigest()['Value'] c.result.append( ["PendingRequest", req] ) c.result = {"success":"true", "result":c.result} elif id in result['Value']['Failed']: # when no request associated to the job c.result = {"success":"false", "error":result['Value']["Failed"][id]} else: c.result = {"success":"false", "error":"No request found with unknown reason"} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("Params:",id) return c.result ################################################################################ def __getParams(self,id): try: id = int(id) except Exception,x: c.result = {"success":"false","error":"%s" % str(x)} return c.result RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getJobParameters(id) if result["OK"]: attr = result["Value"] c.result = [] for i in attr.items(): if i[0] != "StandardOutput": c.result.append([i[0],i[1]]) c.result = {"success":"true","result":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("Params:",id) return c.result ################################################################################ def __getStandardOutput(self,id): RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getJobParameters(id) if result["OK"]: attr = result["Value"] if attr.has_key("StandardOutput"): c.result = attr["StandardOutput"] c.result = {"success":"true","result":c.result} else: c.result = "Not accessible yet" c.result = {"success":"false","error":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("StandardOutput:",id) return c.result ################################################################################ def __delJobs(self,id): MANAGERRPC = getRPCClient("WorkloadManagement/JobManager") result = MANAGERRPC.deleteJob(id) if result["OK"]: c.result = "" c.result = {"success":"true","result":c.result} else: if result.has_key("InvalidJobIDs"): c.result = "Invalid JobIDs: %s" % result["InvalidJobIDs"] c.result = {"success":"false","error":c.result} elif result.has_key("NonauthorizedJobIDs"): c.result = "You are nonauthorized to delete jobs with JobID: %s" % result["NonauthorizedJobIDs"] c.result = {"success":"false","error":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("DELETE:",id) return c.result ################################################################################ def __killJobs(self,id): MANAGERRPC = getRPCClient("WorkloadManagement/JobManager") result = MANAGERRPC.killJob(id) if result["OK"]: c.result = "" c.result = {"success":"true","result":c.result} else: if result.has_key("InvalidJobIDs"): c.result = "Invalid JobIDs: %s" % result["InvalidJobIDs"] c.result = {"success":"false","error":c.result} elif result.has_key("NonauthorizedJobIDs"): c.result = "You are nonauthorized to delete jobs with JobID: %s" % result["NonauthorizedJobIDs"] c.result = {"success":"false","error":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("KILL:",id) return c.result ################################################################################ def __resetJobs(self,id): MANAGERRPC = getRPCClient("WorkloadManagement/JobManager") result = MANAGERRPC.resetJob(id) if result["OK"]: c.result = "" c.result = {"success":"true","result":c.result} else: if result.has_key("InvalidJobIDs"): c.result = "Invalid JobIDs: %s" % result["InvalidJobIDs"] c.result = {"success":"false","error":c.result} elif result.has_key("NonauthorizedJobIDs"): c.result = "You are nonauthorized to delete jobs with JobID: %s" % result["NonauthorizedJobIDs"] c.result = {"success":"false","error":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("RESET:",id) return c.result ################################################################################ def __rescheduleJobs(self,id): MANAGERRPC = getRPCClient("WorkloadManagement/JobManager") result = MANAGERRPC.rescheduleJob(id) if result["OK"]: c.result = "" c.result = {"success":"true","result":c.result} else: if result.has_key("InvalidJobIDs"): c.result = "Invalid JobIDs: %s" % result["InvalidJobIDs"] c.result = {"success":"false","error":c.result} elif result.has_key("NonauthorizedJobIDs"): c.result = "You are nonauthorized to delete jobs with JobID: %s" % result["NonauthorizedJobIDs"] c.result = {"success":"false","error":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("RESET:",id) return c.result ################################################################################ def __pilotGetOutput(self,mode,id): print "PilotOutput:",id PILOTRPC = getRPCClient("WorkloadManagement/WMSAdministrator") result = PILOTRPC.getJobPilotOutput(id) if result["OK"]: output = result["Value"] if mode == "out" and output.has_key("StdOut"): c.result = output["StdOut"] c.result = {"success":"true","result":c.result} elif mode == "err" and output.has_key("StdErr"): c.result = output["StdErr"] c.result = {"success":"true","result":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("pilotGetOutput:",id) return c.result ################################################################################ def __pilotGetURL(self,id): print "LogFile:",id RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getJobParameters(id) if result["OK"]: attr = result["Value"] if attr.has_key("Log URL"): url = attr["Log URL"] url = url.split('"') c.result = url[1] c.result = {"success":"true","result":c.result} else: c.result = "No URL found" c.result = {"success":"false","error":c.result} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("pilotGetURL:",id) return c.result ################################################################################ def __getStagerReport(self,id): RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getJobParameters(id) if result["OK"]: attr = result["Value"] c.result = [] if attr.has_key("StagerReport"): c.result = attr["StagerReport"] c.result = {"success":"true","result":c.result} else: c.result = {"success":"false","error":"StagerReport not available"} else: c.result = {"success":"false","error":result["Message"]} gLogger.info("getStagerReport:",id) return c.result ################################################################################ def __globalStat(self): RPC = getRPCClient("WorkloadManagement/JobMonitoring") result = RPC.getJobPageSummaryWeb({},globalSort,0,1,False) gLogger.info(" - result - :",result) if result["OK"]: result = result["Value"] if result.has_key("Extras"): extra = result["Extras"] back = [] for i in sortList(extra.keys()): back.append([i,extra[i]]) return back ################################################################################ def __getSandBox(self,id): return {"success":"false","error":"Not ready yet"} ################################################################################ def __getPlotSrc(self,type,args,timeToSet,img): rc = ReportsClient() type = str(type) args = str(args) name = type + args if args == "All": args = {} else: args = args.split(",") args = {"Site":args} time = str(timeToSet) now = datetime.datetime.utcnow() if timeToSet == 'day': timeSpan = now - datetime.timedelta( seconds = 86400 ) elif timeToSet == 'week': timeSpan = now - datetime.timedelta( seconds = 86400 * 7 ) elif timeToSet == 'month': timeSpan = now - datetime.timedelta( seconds = 86400 * 30 ) elif timeToSet == 'year': timeSpan = now - datetime.timedelta( seconds = 86400 * 360 ) else: timeSpan = now - datetime.timedelta( seconds = 86400 * 7 ) if len(name) < 1: c.result = {"success":"false","error":"Recived empty value"} else: result = self.__imgCache.get(name) if not result: result = rc.listReports("Job") if result["OK"]: plots = result["Value"] if type == 'jobsBySite': if img == 'True': result = rc.generatePlot("Job",plots[8],timeSpan,now,args,"Site") else: result = rc.generatePlot("Job",plots[8],timeSpan,now,args,"Site",{'thumbnail':True,'widh':800,'height':600,'thb_width':190,'thb_height':125}) elif type == 'jobCPUbySite': if img == 'True': result = rc.generatePlot("Job",plots[0],timeSpan,now,args,"Site") else: result = rc.generatePlot("Job",plots[0],timeSpan,now,args,"Site",{'thumbnail':True,'widh':800,'height':600,'thb_width':196,'thb_height':125}) elif type == 'CPUUsedBySite': if img == 'True': result = rc.generatePlot("Job",plots[2],timeSpan,now,args,"Site") else: result = rc.generatePlot("Job",plots[2],timeSpan,now,args,"Site",{'thumbnail':True,'widh':800,'height':600,'thb_width':196,'thb_height':125}) else: if img == 'True': result = rc.generatePlot("Job",plots[8],timeSpan,now,args,"Site") else: result = rc.generatePlot("Job",plots[8],timeSpan,now,{},"Site",{'thumbnail':True,'widh':800,'height':600,'thb_width':196,'thb_height':125}) gLogger.info("-RES:",result) if result["OK"]: result = result["Value"] if img == 'True': result = result["plot"] else: result = result["thumbnail"] c.result = {"success":"true","result":result} self.__imgCache.add(name, 600, result) else: c.result = {"success":"false","error":result["Message"]} else: c.result = {"success":"false","error":result["Message"]} else: c.result = {"success":"true","result":result} gLogger.info("getPlotSrc:",c.result) return c.result ################################################################################ @jsonify def jobSubmit(self): response.headers['Content-type'] = "text/html" # Otherwise the browser would offer you to download a JobSubmit file if not self.__canRunJobs(): return {"success":"false","error":"You are not allowed to run the jobs"} proxy = self.__getProxyStatus(86460) if proxy["success"] == "false" or proxy["result"] == "false": return {"success":"false","error":"You can not run a job: your proxy is valid less then 24 hours"} jdl = "" params = {} for tmp in request.params: try: if len(request.params[tmp]) > 0: params[tmp] = request.params[tmp] except: pass for item in params: if item == "OutputSandbox": jdl = jdl + str(item) + " = {" + str(params[item]) + "};" if item == "Parameters": try: parameters = int(params[item]) jdl = jdl + str(item) + " = \"" + str(parameters) + "\";" except: parameters = str(params[item]) if parameters.find("{") >= 0 and parameters.find("}") >= 0: parameters = parameters.rstrip("}") parameters = parameters.lstrip("{") if len(parameters) > 0: jdl = jdl + str(item) + " = {" + parameters + "};" else: return {"success":"false","error":"Parameters vector has zero length"} else: return {"success":"false","error":"Parameters must be an integer or a vector. Example: 4 or {1,2,3,4}"} else: jdl = jdl + str(item) + " = \"" + str(params[item]) + "\";" store = [] for key in request.params.keys(): try: if request.params[key].filename: gLogger.info("\033[0;31m file - %s \033[0m " % request.params[key].filename) store.append(request.params[key]) except: pass gLogger.info("\033[0;31m *** %s \033[0m " % params) clearFS = False # Clear directory flag fileNameList = [] exception_counter = 0 if len(store) > 0: # If there is a file(s) in sandbox clearFS = True import shutil import os storePath = tempfile.mkdtemp(prefix='DIRAC_') try: for file in store: name = os.path.join( storePath , file.filename.lstrip(os.sep) ) tFile = open( name , 'w' ) shutil.copyfileobj(file.file, tFile) file.file.close() tFile.close() fileNameList.append(name) except Exception,x: exception_counter = 1 c.result = {"success":"false","error":"An EXCEPTION happens during saving your sandbox file(s): %s" % str(x)} if len(fileNameList) > 0 and exception_counter == 0: sndBox = "InputSandbox = {\"" + "\",\"".join(fileNameList) + "\"};" else: sndBox = "" if exception_counter == 0: jdl = jdl + sndBox from DIRAC.WorkloadManagementSystem.Client.WMSClient import WMSClient jobManager = WMSClient(getRPCClient("WorkloadManagement/JobManager"), getRPCClient("WorkloadManagement/SandboxStore"), getTransferClient("WorkloadManagement/SandboxStore")) jdl = str(jdl) gLogger.info("J D L : ",jdl) try: result = jobManager.submitJob(jdl) if result["OK"]: c.result = {"success":"true","result":result["Value"]} else: c.result = {"success":"false","error":result["Message"]} except Exception,x: c.result = {"success":"false","error":"An EXCEPTION happens during job submittion: %s" % str(x)} if clearFS: shutil.rmtree(storePath) return c.result ################################################################################
DIRACGrid/DIRACWeb
dirac/controllers/jobs/JobMonitor.py
Python
gpl-3.0
43,384
[ "DIRAC" ]
e403f9b7faeee17cbc754b49f19370826982f54a54336edb5909d61f2ddf6f38
""" Tools for reading Ampliconnoise outputs """ import itertools import os.path import re import shutil import tempfile from Bio.Alphabet import generic_dna from Bio.Seq import Seq from Bio.SeqRecord import SeqRecord from anoisetools import sff class AnoiseRawReader(object): """ Reader for AmpliconNoise .raw files, which consist of a row with: record_count key_barcode file_name >id flow_count flow1 [flow2 [flow3...]] This function skips the first record, only returning the flow. Yields BioPython SeqRecords """ def __init__(self, fp): self._fp = (i.rstrip('\n') for i in fp) header = next(self._fp) count, barcode, fname = header.split(None, 2) self.count = int(count) self.barcode = barcode self.fname = fname def __iter__(self): while True: header = next(self._fp)[1:] split_line = next(self._fp).split() flow_length = int(split_line[0]) flows = split_line[1:] flows = map(float, flows) bases = sff.flow_to_seq(flows) sequence = Seq(bases, generic_dna) record = SeqRecord(sequence, id=header) # Add flows and clip as annotations record.annotations['flow_values'] = [int(i * 100) for i in flows] record.annotations['clip_flow_right'] = flow_length yield record def _record_to_anoise_raw(seq_record): """ Generates a string suitable for using as input to AmpliconNoise, consisting of the identifier, a newline, the integer length of the flow, a space, and the float flow readings. """ l, r = sff.find_clip(seq_record) return '>{identifier}\n{length} {flow}'.format( identifier=seq_record.id, length=r, flow=sff.flows_to_string(seq_record)) class AnoiseRawWriter(object): """ Writer for AmpliconNoise .raw files .raw files require a row at the top with a count of records. To get around that, this class writes to a temporary file, then copies the results to fp when closed. """ def __init__(self, fp, identifier): self.identifier = identifier self._fp = fp self.file_name = getattr(fp, 'name', "Unknown File") self._temp = tempfile.TemporaryFile() self.count = 0 def _header(self): return '{0.count} {0.identifier} {0.file_name}'.format(self) def write(self, record): print >> self._temp, _record_to_anoise_raw(record) self.count += 1 def write_records(self, records): for record in records: self.write(record) def _copy_to_output(self): """ Add a header to _fp, writes all the records in _temp """ # Rewind self._temp.seek(0) # Write header print >> self._fp, self._header() # Copy contents shutil.copyfileobj(self._temp, self._fp) def close(self): """ Close the writer Copies all records into the destination file, closes the temp handle """ # No action if already closed if self._temp.closed and self._fp.closed: return try: self._copy_to_output() finally: self._temp.close() self._fp.close() def _read_count(read_name): m = re.search(r'noise_\d+_(\d+)$', read_name) if m: return int(m.group(1)) return 1 def read_mapping(fp): """ Read an ampliconnoise .mapping file """ # Base name bp = os.path.splitext(os.path.basename(fp.name))[0] indexes = itertools.count() lines = (i.strip() for i in fp) for line in lines: used_sequence, seqs = line.split(None, 1) seqs = seqs.split(',') total_weight = sum(_read_count(i) for i in seqs) read_name = '{0}_{1}_{2}'.format(bp, next(indexes), total_weight) yield read_name, seqs def merge_mapping(snoise_map, pnoise_map): """ Merge two mapping files, from SeqNoise and PyroNoise, ending with a mapping from seqnoise_id -> list of original sequence ids """ pnoise_map = dict(pnoise_map) for i, seqs in snoise_map: orig_seqs = [o for s in seqs for o in pnoise_map[s]] yield i, orig_seqs
fhcrc/ampliconnoise
anoisetools/anoiseio.py
Python
gpl-3.0
4,311
[ "Biopython" ]
ccd19395c2c83f9864593eb7f8a58e353a6b244afe8637fc5dcc7ab137a5258a
# ast.py # Copyright (C) 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """utilities for analyzing expressions and blocks of Python code, as well as generating Python from AST nodes""" from mako import exceptions, pyparser, util import re class PythonCode(object): """represents information about a string containing Python code""" def __init__(self, code, **exception_kwargs): self.code = code # represents all identifiers which are assigned to at some point in the code self.declared_identifiers = util.Set() # represents all identifiers which are referenced before their assignment, if any self.undeclared_identifiers = util.Set() # note that an identifier can be in both the undeclared and declared lists. # using AST to parse instead of using code.co_varnames, code.co_names has several advantages: # - we can locate an identifier as "undeclared" even if its declared later in the same block of code # - AST is less likely to break with version changes (for example, the behavior of co_names changed a little bit # in python version 2.5) if isinstance(code, basestring): expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs) else: expr = code f = pyparser.FindIdentifiers(self, **exception_kwargs) f.visit(expr) class ArgumentList(object): """parses a fragment of code as a comma-separated list of expressions""" def __init__(self, code, **exception_kwargs): self.codeargs = [] self.args = [] self.declared_identifiers = util.Set() self.undeclared_identifiers = util.Set() if isinstance(code, basestring): if re.match(r"\S", code) and not re.match(r",\s*$", code): # if theres text and no trailing comma, insure its parsed # as a tuple by adding a trailing comma code += "," expr = pyparser.parse(code, "exec", **exception_kwargs) else: expr = code f = pyparser.FindTuple(self, PythonCode, **exception_kwargs) f.visit(expr) class PythonFragment(PythonCode): """extends PythonCode to provide identifier lookups in partial control statements e.g. for x in 5: elif y==9: except (MyException, e): etc. """ def __init__(self, code, **exception_kwargs): m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S) if not m: raise exceptions.CompileException("Fragment '%s' is not a partial control statement" % code, **exception_kwargs) if m.group(3): code = code[:m.start(3)] (keyword, expr) = m.group(1,2) if keyword in ['for','if', 'while']: code = code + "pass" elif keyword == 'try': code = code + "pass\nexcept:pass" elif keyword == 'elif' or keyword == 'else': code = "if False:pass\n" + code + "pass" elif keyword == 'except': code = "try:pass\n" + code + "pass" else: raise exceptions.CompileException("Unsupported control keyword: '%s'" % keyword, **exception_kwargs) super(PythonFragment, self).__init__(code, **exception_kwargs) class FunctionDecl(object): """function declaration""" def __init__(self, code, allow_kwargs=True, **exception_kwargs): self.code = code expr = pyparser.parse(code, "exec", **exception_kwargs) f = pyparser.ParseFunc(self, **exception_kwargs) f.visit(expr) if not hasattr(self, 'funcname'): raise exceptions.CompileException("Code '%s' is not a function declaration" % code, **exception_kwargs) if not allow_kwargs and self.kwargs: raise exceptions.CompileException("'**%s' keyword argument not allowed here" % self.argnames[-1], **exception_kwargs) def get_argument_expressions(self, include_defaults=True): """return the argument declarations of this FunctionDecl as a printable list.""" namedecls = [] defaults = [d for d in self.defaults] kwargs = self.kwargs varargs = self.varargs argnames = [f for f in self.argnames] argnames.reverse() for arg in argnames: default = None if kwargs: arg = "**" + arg kwargs = False elif varargs: arg = "*" + arg varargs = False else: default = len(defaults) and defaults.pop() or None if include_defaults and default: namedecls.insert(0, "%s=%s" % (arg, pyparser.ExpressionGenerator(default).value())) else: namedecls.insert(0, arg) return namedecls class FunctionArgs(FunctionDecl): """the argument portion of a function declaration""" def __init__(self, code, **kwargs): super(FunctionArgs, self).__init__("def ANON(%s):pass" % code, **kwargs)
kindy61/mako
mako/ast.py
Python
mit
5,251
[ "VisIt" ]
306ca5ed498897e3e02df87027edb8425c5c07011074f0a3835255941cf8756a
""" vtkImageExportToArray - a NumPy front-end to vtkImageExport This class converts a VTK image to a numpy array. The output array will always have 3 dimensions (or 4, if the image had multiple scalar components). To use this class, you must have numpy installed (http://numpy.scipy.org) Methods SetInputConnection(vtkAlgorithmOutput) -- connect to VTK image pipeline SetInput(vtkImageData) -- set an vtkImageData to export GetArray() -- execute pipeline and return a numpy array Methods from vtkImageExport GetDataExtent() GetDataSpacing() GetDataOrigin() """ import umath import numpy from vtk import vtkImageExport from vtk import VTK_SIGNED_CHAR from vtk import VTK_UNSIGNED_CHAR from vtk import VTK_SHORT from vtk import VTK_UNSIGNED_SHORT from vtk import VTK_INT from vtk import VTK_UNSIGNED_INT from vtk import VTK_LONG from vtk import VTK_UNSIGNED_LONG from vtk import VTK_FLOAT from vtk import VTK_DOUBLE class vtkImageExportToArray: def __init__(self): self.__export = vtkImageExport() self.__ConvertUnsignedShortToInt = False # type dictionary __typeDict = { VTK_SIGNED_CHAR:'b', VTK_UNSIGNED_CHAR:'B', VTK_SHORT:'h', VTK_UNSIGNED_SHORT:'H', VTK_INT:'i', VTK_UNSIGNED_INT:'I', VTK_FLOAT:'f', VTK_DOUBLE:'d'} __sizeDict = { VTK_SIGNED_CHAR:1, VTK_UNSIGNED_CHAR:1, VTK_SHORT:2, VTK_UNSIGNED_SHORT:2, VTK_INT:4, VTK_UNSIGNED_INT:4, VTK_FLOAT:4, VTK_DOUBLE:8 } # convert unsigned shorts to ints, to avoid sign problems def SetConvertUnsignedShortToInt(self,yesno): self.__ConvertUnsignedShortToInt = yesno def GetConvertUnsignedShortToInt(self): return self.__ConvertUnsignedShortToInt def ConvertUnsignedShortToIntOn(self): self.__ConvertUnsignedShortToInt = True def ConvertUnsignedShortToIntOff(self): self.__ConvertUnsignedShortToInt = False # set the input def SetInputConnection(self,input): return self.__export.SetInputConnection(input) def SetInput(self,input): return self.__export.SetInput(input) def GetInput(self): return self.__export.GetInput() def GetArray(self): input = self.__export.GetInput() input.UpdateInformation() type = input.GetScalarType() extent = input.GetWholeExtent() numComponents = input.GetNumberOfScalarComponents() dim = (extent[5]-extent[4]+1, extent[3]-extent[2]+1, extent[1]-extent[0]+1) if (numComponents > 1): dim = dim + (numComponents,) imArray = numpy.zeros(dim, self.__typeDict[type]) self.__export.Export(imArray) # convert unsigned short to int to avoid sign issues if (type == VTK_UNSIGNED_SHORT and self.__ConvertUnsignedShortToInt): imArray = umath.bitwise_and(imArray.astype('i'),0xffff) return imArray def GetDataExtent(self): return self.__export.GetDataExtent() def GetDataSpacing(self): return self.__export.GetDataSpacing() def GetDataOrigin(self): return self.__export.GetDataOrigin()
b3c/VTK-5.8
Wrapping/Python/vtk/util/vtkImageExportToArray.py
Python
bsd-3-clause
3,370
[ "VTK" ]
fb3f37a58462722ad31f274b591abad0e1a189e36252ac237da640018d305f15
# Copyright 2017 the GPflow authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- from unittest.mock import patch import numpy as np import pytest import tensorflow as tf from numpy.testing import assert_allclose, assert_almost_equal import gpflow from gpflow import Parameter, default_float, default_jitter from gpflow.base import AnyNDArray, TensorType from gpflow.inducing_variables import InducingPoints from gpflow.kernels import Kernel from gpflow.kullback_leiblers import gauss_kl, prior_kl from gpflow.utilities.bijectors import triangular rng = np.random.RandomState(0) # ------------------------------------------ # Fixtures # ------------------------------------------ Ln = 2 Nn = 10 Mn = 50 @pytest.fixture(scope="module") def kernel() -> Kernel: k = gpflow.kernels.Matern32() + gpflow.kernels.White() k.kernels[1].variance.assign(0.01) return k @pytest.fixture(scope="module") def inducing_points() -> InducingPoints: return InducingPoints(rng.randn(Nn, 1)) @pytest.fixture(scope="module") def mu() -> Parameter: return Parameter(rng.randn(Nn, Ln)) # ------------------------------------------ # Helpers # ------------------------------------------ def make_sqrt(N: int, M: int) -> TensorType: return np.array([np.tril(rng.randn(M, M)) for _ in range(N)]) # [N, M, M] def make_K_batch(N: int, M: int) -> TensorType: K_np = rng.randn(N, M, M) beye = np.array([np.eye(M) for _ in range(N)]) return 0.1 * (K_np + np.transpose(K_np, (0, 2, 1))) + beye def compute_kl_1d(q_mu: TensorType, q_sigma: TensorType, p_var: TensorType = 1.0) -> TensorType: p_var = tf.ones_like(q_sigma) if p_var is None else p_var q_var = tf.square(q_sigma) kl = 0.5 * (q_var / p_var + tf.square(q_mu) / p_var - 1 + tf.math.log(p_var / q_var)) return tf.reduce_sum(kl) # ------------------------------------------ # Data classes: storing constants # ------------------------------------------ class Datum: M, N = 5, 4 mu = rng.randn(M, N) # [M, N] A = rng.randn(M, M) I = np.eye(M) # [M, M] K = A @ A.T + default_jitter() * I # [M, M] sqrt = make_sqrt(N, M) # [N, M, M] sqrt_diag = rng.randn(M, N) # [M, N] K_batch = make_K_batch(N, M) K_cholesky = np.linalg.cholesky(K) @pytest.mark.parametrize("diag", [True, False]) def test_kl_k_cholesky(diag: bool) -> None: """ Test that passing K or K_cholesky yield the same answer """ q_mu = Datum.mu q_sqrt = Datum.sqrt_diag if diag else Datum.sqrt kl_K = gauss_kl(q_mu, q_sqrt, K=Datum.K) kl_K_chol = gauss_kl(q_mu, q_sqrt, K_cholesky=Datum.K_cholesky) np.testing.assert_allclose(kl_K.numpy(), kl_K_chol.numpy()) @pytest.mark.parametrize("white", [True, False]) def test_diags(white: bool) -> None: """ The covariance of q(x) can be Cholesky matrices or diagonal matrices. Here we make sure the behaviours overlap. """ # the chols are diagonal matrices, with the same entries as the diag representation. chol_from_diag = tf.stack( [tf.linalg.diag(Datum.sqrt_diag[:, i]) for i in range(Datum.N)] # [N, M, M] ) kl_diag = gauss_kl(Datum.mu, Datum.sqrt_diag, Datum.K if white else None) kl_dense = gauss_kl(Datum.mu, chol_from_diag, Datum.K if white else None) np.testing.assert_allclose(kl_diag, kl_dense) @pytest.mark.parametrize("diag", [True, False]) def test_whitened(diag: bool) -> None: """ Check that K=Identity and K=None give same answer """ chol_from_diag = tf.stack( [tf.linalg.diag(Datum.sqrt_diag[:, i]) for i in range(Datum.N)] # [N, M, M] ) s = Datum.sqrt_diag if diag else chol_from_diag kl_white = gauss_kl(Datum.mu, s) kl_nonwhite = gauss_kl(Datum.mu, s, Datum.I) np.testing.assert_allclose(kl_white, kl_nonwhite) @pytest.mark.parametrize("shared_k", [True, False]) @pytest.mark.parametrize("diag", [True, False]) def test_sumkl_equals_batchkl(shared_k: bool, diag: bool) -> None: """ gauss_kl implicitely performs a sum of KL divergences This test checks that doing the sum outside of the function is equivalent For q(X)=prod q(x_l) and p(X)=prod p(x_l), check that sum KL(q(x_l)||p(x_l)) = KL(q(X)||p(X)) Here, q(X) has covariance [L, M, M] p(X) has covariance [L, M, M] ( or [M, M] ) Here, q(x_i) has covariance [1, M, M] p(x_i) has covariance [M, M] """ s = Datum.sqrt_diag if diag else Datum.sqrt kl_batch = gauss_kl(Datum.mu, s, Datum.K if shared_k else Datum.K_batch) kl_sum = [] for n in range(Datum.N): q_mu_n = Datum.mu[:, n][:, None] # [M, 1] q_sqrt_n = ( Datum.sqrt_diag[:, n][:, None] if diag else Datum.sqrt[n, :, :][None, :, :] ) # [1, M, M] or [M, 1] K_n = Datum.K if shared_k else Datum.K_batch[n, :, :][None, :, :] # [1, M, M] or [M, M] kl_n = gauss_kl(q_mu_n, q_sqrt_n, K=K_n) kl_sum.append(kl_n) kl_sum = tf.reduce_sum(kl_sum) assert_almost_equal(kl_sum, kl_batch) @patch("tensorflow.__version__", "2.1.0") def test_sumkl_equals_batchkl_shared_k_not_diag_mocked_tf21() -> None: """ Version of test_sumkl_equals_batchkl with shared_k=True and diag=False that tests the TensorFlow < 2.2 workaround with tiling still works. """ kl_batch = gauss_kl(Datum.mu, Datum.sqrt, Datum.K) kl_sum = [] for n in range(Datum.N): q_mu_n = Datum.mu[:, n][:, None] # [M, 1] q_sqrt_n = Datum.sqrt[n, :, :][None, :, :] # [1, M, M] or [M, 1] K_n = Datum.K # [1, M, M] or [M, M] kl_n = gauss_kl(q_mu_n, q_sqrt_n, K=K_n) kl_sum.append(kl_n) kl_sum = tf.reduce_sum(kl_sum) assert_almost_equal(kl_sum, kl_batch) @pytest.mark.parametrize("dim", [0, 1]) @pytest.mark.parametrize("white", [True, False]) def test_oned(white: bool, dim: bool) -> None: """ Check that the KL divergence matches a 1D by-hand calculation. """ mu1d = Datum.mu[dim, :][None, :] # [1, N] s1d = Datum.sqrt[:, dim, dim][:, None, None] # [N, 1, 1] K1d = Datum.K_batch[:, dim, dim][:, None, None] # [N, 1, 1] kl = gauss_kl(mu1d, s1d, K1d if not white else None) kl_1d = compute_kl_1d( tf.reshape(mu1d, (-1,)), # N tf.reshape(s1d, (-1,)), # N None if white else tf.reshape(K1d, (-1,)), ) # N np.testing.assert_allclose(kl, kl_1d) def test_unknown_size_inputs() -> None: """ Test for #725 and #734. When the shape of the Gaussian's mean had at least one unknown parameter, `gauss_kl` would blow up. This happened because `tf.size` can only output types `tf.int32` or `tf.int64`. """ mu: AnyNDArray = np.ones([1, 4], dtype=default_float()) sqrt: AnyNDArray = np.ones([4, 1, 1], dtype=default_float()) known_shape = gauss_kl(*map(tf.constant, [mu, sqrt])) unknown_shape = gauss_kl(mu, sqrt) np.testing.assert_allclose(known_shape, unknown_shape) @pytest.mark.parametrize("white", [True, False]) def test_q_sqrt_constraints( inducing_points: bool, kernel: Kernel, mu: AnyNDArray, white: bool ) -> None: """Test that sending in an unconstrained q_sqrt returns the same conditional evaluation and gradients. This is important to match the behaviour of the KL, which enforces q_sqrt is triangular. """ tril = np.tril(rng.randn(Ln, Nn, Nn)) q_sqrt_constrained = Parameter(tril, transform=triangular()) q_sqrt_unconstrained = Parameter(tril) diff_before_gradient_step = (q_sqrt_constrained - q_sqrt_unconstrained).numpy() assert_allclose(diff_before_gradient_step, 0) kls = [] for q_sqrt in [q_sqrt_constrained, q_sqrt_unconstrained]: with tf.GradientTape() as tape: kl = prior_kl(inducing_points, kernel, mu, q_sqrt, whiten=white) grad = tape.gradient(kl, q_sqrt.unconstrained_variable) q_sqrt.unconstrained_variable.assign_sub(grad) kls.append(kl) diff_kls_before_gradient_step = kls[0] - kls[1] assert_allclose(diff_kls_before_gradient_step, 0) diff_after_gradient_step = (q_sqrt_constrained - q_sqrt_unconstrained).numpy() assert_allclose(diff_after_gradient_step, 0)
GPflow/GPflow
tests/gpflow/test_kullback_leiblers.py
Python
apache-2.0
8,690
[ "Gaussian" ]
373dd001ef14fddd9a5287395e78dc04a21aaaeb5406ea9b5a17bc061352a699
# -*- coding: utf-8 -*- import sys import textwrap import pytest from _pytest import fixtures from _pytest.fixtures import FixtureLookupError from _pytest.fixtures import FixtureRequest from _pytest.pathlib import Path from _pytest.pytester import get_public_names def test_getfuncargnames(): def f(): pass assert not fixtures.getfuncargnames(f) def g(arg): pass assert fixtures.getfuncargnames(g) == ("arg",) def h(arg1, arg2="hello"): pass assert fixtures.getfuncargnames(h) == ("arg1",) def h(arg1, arg2, arg3="hello"): pass assert fixtures.getfuncargnames(h) == ("arg1", "arg2") class A(object): def f(self, arg1, arg2="hello"): pass @staticmethod def static(arg1, arg2): pass assert fixtures.getfuncargnames(A().f) == ("arg1",) assert fixtures.getfuncargnames(A.static, cls=A) == ("arg1", "arg2") @pytest.mark.pytester_example_path("fixtures/fill_fixtures") class TestFillFixtures(object): def test_fillfuncargs_exposed(self): # used by oejskit, kept for compatibility assert pytest._fillfuncargs == fixtures.fillfixtures def test_funcarg_lookupfails(self, testdir): testdir.copy_example() result = testdir.runpytest() # "--collect-only") assert result.ret != 0 result.stdout.fnmatch_lines( """ *def test_func(some)* *fixture*some*not found* *xyzsomething* """ ) def test_detect_recursive_dependency_error(self, testdir): testdir.copy_example() result = testdir.runpytest() result.stdout.fnmatch_lines( ["*recursive dependency involving fixture 'fix1' detected*"] ) def test_funcarg_basic(self, testdir): testdir.copy_example() item = testdir.getitem(Path("test_funcarg_basic.py")) fixtures.fillfixtures(item) del item.funcargs["request"] assert len(get_public_names(item.funcargs)) == 2 assert item.funcargs["some"] == "test_func" assert item.funcargs["other"] == 42 def test_funcarg_lookup_modulelevel(self, testdir): testdir.copy_example() reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_funcarg_lookup_classlevel(self, testdir): p = testdir.copy_example() result = testdir.runpytest(p) result.stdout.fnmatch_lines(["*1 passed*"]) def test_conftest_funcargs_only_available_in_subdir(self, testdir): testdir.copy_example() result = testdir.runpytest("-v") result.assert_outcomes(passed=2) def test_extend_fixture_module_class(self, testdir): testfile = testdir.copy_example() result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*1 passed*"]) def test_extend_fixture_conftest_module(self, testdir): p = testdir.copy_example() result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(next(p.visit("test_*.py"))) result.stdout.fnmatch_lines(["*1 passed*"]) def test_extend_fixture_conftest_conftest(self, testdir): p = testdir.copy_example() result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(next(p.visit("test_*.py"))) result.stdout.fnmatch_lines(["*1 passed*"]) def test_extend_fixture_conftest_plugin(self, testdir): testdir.makepyfile( testplugin=""" import pytest @pytest.fixture def foo(): return 7 """ ) testdir.syspathinsert() testdir.makeconftest( """ import pytest pytest_plugins = 'testplugin' @pytest.fixture def foo(foo): return foo + 7 """ ) testdir.makepyfile( """ def test_foo(foo): assert foo == 14 """ ) result = testdir.runpytest("-s") assert result.ret == 0 def test_extend_fixture_plugin_plugin(self, testdir): # Two plugins should extend each order in loading order testdir.makepyfile( testplugin0=""" import pytest @pytest.fixture def foo(): return 7 """ ) testdir.makepyfile( testplugin1=""" import pytest @pytest.fixture def foo(foo): return foo + 7 """ ) testdir.syspathinsert() testdir.makepyfile( """ pytest_plugins = ['testplugin0', 'testplugin1'] def test_foo(foo): assert foo == 14 """ ) result = testdir.runpytest() assert result.ret == 0 def test_override_parametrized_fixture_conftest_module(self, testdir): """Test override of the parametrized fixture with non-parametrized one on the test module level.""" testdir.makeconftest( """ import pytest @pytest.fixture(params=[1, 2, 3]) def spam(request): return request.param """ ) testfile = testdir.makepyfile( """ import pytest @pytest.fixture def spam(): return 'spam' def test_spam(spam): assert spam == 'spam' """ ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*1 passed*"]) def test_override_parametrized_fixture_conftest_conftest(self, testdir): """Test override of the parametrized fixture with non-parametrized one on the conftest level.""" testdir.makeconftest( """ import pytest @pytest.fixture(params=[1, 2, 3]) def spam(request): return request.param """ ) subdir = testdir.mkpydir("subdir") subdir.join("conftest.py").write( textwrap.dedent( """\ import pytest @pytest.fixture def spam(): return 'spam' """ ) ) testfile = subdir.join("test_spam.py") testfile.write( textwrap.dedent( """\ def test_spam(spam): assert spam == "spam" """ ) ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*1 passed*"]) def test_override_non_parametrized_fixture_conftest_module(self, testdir): """Test override of the non-parametrized fixture with parametrized one on the test module level.""" testdir.makeconftest( """ import pytest @pytest.fixture def spam(): return 'spam' """ ) testfile = testdir.makepyfile( """ import pytest @pytest.fixture(params=[1, 2, 3]) def spam(request): return request.param params = {'spam': 1} def test_spam(spam): assert spam == params['spam'] params['spam'] += 1 """ ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*3 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*3 passed*"]) def test_override_non_parametrized_fixture_conftest_conftest(self, testdir): """Test override of the non-parametrized fixture with parametrized one on the conftest level.""" testdir.makeconftest( """ import pytest @pytest.fixture def spam(): return 'spam' """ ) subdir = testdir.mkpydir("subdir") subdir.join("conftest.py").write( textwrap.dedent( """\ import pytest @pytest.fixture(params=[1, 2, 3]) def spam(request): return request.param """ ) ) testfile = subdir.join("test_spam.py") testfile.write( textwrap.dedent( """\ params = {'spam': 1} def test_spam(spam): assert spam == params['spam'] params['spam'] += 1 """ ) ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*3 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*3 passed*"]) def test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest( self, testdir ): """Test override of the autouse fixture with parametrized one on the conftest level. This test covers the issue explained in issue 1601 """ testdir.makeconftest( """ import pytest @pytest.fixture(autouse=True) def spam(): return 'spam' """ ) subdir = testdir.mkpydir("subdir") subdir.join("conftest.py").write( textwrap.dedent( """\ import pytest @pytest.fixture(params=[1, 2, 3]) def spam(request): return request.param """ ) ) testfile = subdir.join("test_spam.py") testfile.write( textwrap.dedent( """\ params = {'spam': 1} def test_spam(spam): assert spam == params['spam'] params['spam'] += 1 """ ) ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*3 passed*"]) result = testdir.runpytest(testfile) result.stdout.fnmatch_lines(["*3 passed*"]) def test_autouse_fixture_plugin(self, testdir): # A fixture from a plugin has no baseid set, which screwed up # the autouse fixture handling. testdir.makepyfile( testplugin=""" import pytest @pytest.fixture(autouse=True) def foo(request): request.function.foo = 7 """ ) testdir.syspathinsert() testdir.makepyfile( """ pytest_plugins = 'testplugin' def test_foo(request): assert request.function.foo == 7 """ ) result = testdir.runpytest() assert result.ret == 0 def test_funcarg_lookup_error(self, testdir): testdir.makeconftest( """ import pytest @pytest.fixture def a_fixture(): pass @pytest.fixture def b_fixture(): pass @pytest.fixture def c_fixture(): pass @pytest.fixture def d_fixture(): pass """ ) testdir.makepyfile( """ def test_lookup_error(unknown): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "*ERROR at setup of test_lookup_error*", " def test_lookup_error(unknown):*", "E fixture 'unknown' not found", "> available fixtures:*a_fixture,*b_fixture,*c_fixture,*d_fixture*monkeypatch,*", # sorted "> use 'py*test --fixtures *' for help on them.", "*1 error*", ] ) assert "INTERNAL" not in result.stdout.str() def test_fixture_excinfo_leak(self, testdir): # on python2 sys.excinfo would leak into fixture executions testdir.makepyfile( """ import sys import traceback import pytest @pytest.fixture def leak(): if sys.exc_info()[0]: # python3 bug :) traceback.print_exc() #fails assert sys.exc_info() == (None, None, None) def test_leak(leak): if sys.exc_info()[0]: # python3 bug :) traceback.print_exc() assert sys.exc_info() == (None, None, None) """ ) result = testdir.runpytest() assert result.ret == 0 class TestRequestBasic(object): def test_request_attributes(self, testdir): item = testdir.getitem( """ import pytest @pytest.fixture def something(request): pass def test_func(something): pass """ ) req = fixtures.FixtureRequest(item) assert req.function == item.obj assert req.keywords == item.keywords assert hasattr(req.module, "test_func") assert req.cls is None assert req.function.__name__ == "test_func" assert req.config == item.config assert repr(req).find(req.function.__name__) != -1 def test_request_attributes_method(self, testdir): item, = testdir.getitems( """ import pytest class TestB(object): @pytest.fixture def something(self, request): return 1 def test_func(self, something): pass """ ) req = item._request assert req.cls.__name__ == "TestB" assert req.instance.__class__ == req.cls def test_request_contains_funcarg_arg2fixturedefs(self, testdir): modcol = testdir.getmodulecol( """ import pytest @pytest.fixture def something(request): pass class TestClass(object): def test_method(self, something): pass """ ) item1, = testdir.genitems([modcol]) assert item1.name == "test_method" arg2fixturedefs = fixtures.FixtureRequest(item1)._arg2fixturedefs assert len(arg2fixturedefs) == 1 assert arg2fixturedefs["something"][0].argname == "something" @pytest.mark.skipif( hasattr(sys, "pypy_version_info"), reason="this method of test doesn't work on pypy", ) def test_request_garbage(self, testdir): try: import xdist # noqa except ImportError: pass else: pytest.xfail("this test is flaky when executed with xdist") testdir.makepyfile( """ import sys import pytest from _pytest.fixtures import PseudoFixtureDef import gc @pytest.fixture(autouse=True) def something(request): original = gc.get_debug() gc.set_debug(gc.DEBUG_SAVEALL) gc.collect() yield try: gc.collect() leaked_types = sum(1 for _ in gc.garbage if isinstance(_, PseudoFixtureDef)) # debug leaked types if the test fails print(leaked_types) gc.garbage[:] = [] assert leaked_types == 0 finally: gc.set_debug(original) def test_func(): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines("* 1 passed in *") def test_getfixturevalue_recursive(self, testdir): testdir.makeconftest( """ import pytest @pytest.fixture def something(request): return 1 """ ) testdir.makepyfile( """ import pytest @pytest.fixture def something(request): return request.getfixturevalue("something") + 1 def test_func(something): assert something == 2 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @pytest.mark.parametrize("getfixmethod", ("getfixturevalue", "getfuncargvalue")) def test_getfixturevalue(self, testdir, getfixmethod): item = testdir.getitem( """ import pytest values = [2] @pytest.fixture def something(request): return 1 @pytest.fixture def other(request): return values.pop() def test_func(something): pass """ ) import contextlib if getfixmethod == "getfuncargvalue": warning_expectation = pytest.warns(DeprecationWarning) else: # see #1830 for a cleaner way to accomplish this @contextlib.contextmanager def expecting_no_warning(): yield warning_expectation = expecting_no_warning() req = item._request with warning_expectation: fixture_fetcher = getattr(req, getfixmethod) with pytest.raises(FixtureLookupError): fixture_fetcher("notexists") val = fixture_fetcher("something") assert val == 1 val = fixture_fetcher("something") assert val == 1 val2 = fixture_fetcher("other") assert val2 == 2 val2 = fixture_fetcher("other") # see about caching assert val2 == 2 pytest._fillfuncargs(item) assert item.funcargs["something"] == 1 assert len(get_public_names(item.funcargs)) == 2 assert "request" in item.funcargs def test_request_addfinalizer(self, testdir): item = testdir.getitem( """ import pytest teardownlist = [] @pytest.fixture def something(request): request.addfinalizer(lambda: teardownlist.append(1)) def test_func(something): pass """ ) item.session._setupstate.prepare(item) pytest._fillfuncargs(item) # successively check finalization calls teardownlist = item.getparent(pytest.Module).obj.teardownlist ss = item.session._setupstate assert not teardownlist ss.teardown_exact(item, None) print(ss.stack) assert teardownlist == [1] def test_mark_as_fixture_with_prefix_and_decorator_fails(self, testdir): testdir.makeconftest( """ import pytest @pytest.fixture def pytest_funcarg__marked_with_prefix_and_decorator(): pass """ ) result = testdir.runpytest_subprocess() assert result.ret != 0 result.stdout.fnmatch_lines( [ "*AssertionError: fixtures cannot have*@pytest.fixture*", "*pytest_funcarg__marked_with_prefix_and_decorator*", ] ) def test_request_addfinalizer_failing_setup(self, testdir): testdir.makepyfile( """ import pytest values = [1] @pytest.fixture def myfix(request): request.addfinalizer(values.pop) assert 0 def test_fix(myfix): pass def test_finalizer_ran(): assert not values """ ) reprec = testdir.inline_run("-s") reprec.assertoutcome(failed=1, passed=1) def test_request_addfinalizer_failing_setup_module(self, testdir): testdir.makepyfile( """ import pytest values = [1, 2] @pytest.fixture(scope="module") def myfix(request): request.addfinalizer(values.pop) request.addfinalizer(values.pop) assert 0 def test_fix(myfix): pass """ ) reprec = testdir.inline_run("-s") mod = reprec.getcalls("pytest_runtest_setup")[0].item.module assert not mod.values def test_request_addfinalizer_partial_setup_failure(self, testdir): p = testdir.makepyfile( """ import pytest values = [] @pytest.fixture def something(request): request.addfinalizer(lambda: values.append(None)) def test_func(something, missingarg): pass def test_second(): assert len(values) == 1 """ ) result = testdir.runpytest(p) result.stdout.fnmatch_lines( ["*1 error*"] # XXX the whole module collection fails ) def test_request_subrequest_addfinalizer_exceptions(self, testdir): """ Ensure exceptions raised during teardown by a finalizer are suppressed until all finalizers are called, re-raising the first exception (#2440) """ testdir.makepyfile( """ import pytest values = [] def _excepts(where): raise Exception('Error in %s fixture' % where) @pytest.fixture def subrequest(request): return request @pytest.fixture def something(subrequest): subrequest.addfinalizer(lambda: values.append(1)) subrequest.addfinalizer(lambda: values.append(2)) subrequest.addfinalizer(lambda: _excepts('something')) @pytest.fixture def excepts(subrequest): subrequest.addfinalizer(lambda: _excepts('excepts')) subrequest.addfinalizer(lambda: values.append(3)) def test_first(something, excepts): pass def test_second(): assert values == [3, 2, 1] """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( ["*Exception: Error in excepts fixture", "* 2 passed, 1 error in *"] ) def test_request_getmodulepath(self, testdir): modcol = testdir.getmodulecol("def test_somefunc(): pass") item, = testdir.genitems([modcol]) req = fixtures.FixtureRequest(item) assert req.fspath == modcol.fspath def test_request_fixturenames(self, testdir): testdir.makepyfile( """ import pytest from _pytest.pytester import get_public_names @pytest.fixture() def arg1(): pass @pytest.fixture() def farg(arg1): pass @pytest.fixture(autouse=True) def sarg(tmpdir): pass def test_function(request, farg): assert set(get_public_names(request.fixturenames)) == \ set(["tmpdir", "sarg", "arg1", "request", "farg", "tmpdir_factory"]) """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_request_fixturenames_dynamic_fixture(self, testdir): """Regression test for #3057""" testdir.copy_example("fixtures/test_getfixturevalue_dynamic.py") result = testdir.runpytest() result.stdout.fnmatch_lines("*1 passed*") def test_funcargnames_compatattr(self, testdir): testdir.makepyfile( """ import pytest def pytest_generate_tests(metafunc): assert metafunc.funcargnames == metafunc.fixturenames @pytest.fixture def fn(request): assert request._pyfuncitem.funcargnames == \ request._pyfuncitem.fixturenames return request.funcargnames, request.fixturenames def test_hello(fn): assert fn[0] == fn[1] """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_setupdecorator_and_xunit(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(scope='module', autouse=True) def setup_module(): values.append("module") @pytest.fixture(autouse=True) def setup_function(): values.append("function") def test_func(): pass class TestClass(object): @pytest.fixture(scope="class", autouse=True) def setup_class(self): values.append("class") @pytest.fixture(autouse=True) def setup_method(self): values.append("method") def test_method(self): pass def test_all(): assert values == ["module", "function", "class", "function", "method", "function"] """ ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=3) def test_fixtures_sub_subdir_normalize_sep(self, testdir): # this tests that normalization of nodeids takes place b = testdir.mkdir("tests").mkdir("unit") b.join("conftest.py").write( textwrap.dedent( """\ import pytest @pytest.fixture def arg1(): pass """ ) ) p = b.join("test_module.py") p.write("def test_func(arg1): pass") result = testdir.runpytest(p, "--fixtures") assert result.ret == 0 result.stdout.fnmatch_lines( """ *fixtures defined*conftest* *arg1* """ ) def test_show_fixtures_color_yes(self, testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest("--color=yes", "--fixtures") assert "\x1b[32mtmpdir" in result.stdout.str() def test_newstyle_with_request(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture() def arg(request): pass def test_1(arg): pass """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_setupcontext_no_param(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(params=[1,2]) def arg(request): return request.param @pytest.fixture(autouse=True) def mysetup(request, arg): assert not hasattr(request, "param") def test_1(arg): assert arg in (1,2) """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) class TestRequestMarking(object): def test_applymarker(self, testdir): item1, item2 = testdir.getitems( """ import pytest @pytest.fixture def something(request): pass class TestClass(object): def test_func1(self, something): pass def test_func2(self, something): pass """ ) req1 = fixtures.FixtureRequest(item1) assert "xfail" not in item1.keywords req1.applymarker(pytest.mark.xfail) assert "xfail" in item1.keywords assert "skipif" not in item1.keywords req1.applymarker(pytest.mark.skipif) assert "skipif" in item1.keywords pytest.raises(ValueError, "req1.applymarker(42)") def test_accesskeywords(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture() def keywords(request): return request.keywords @pytest.mark.XYZ def test_function(keywords): assert keywords["XYZ"] assert "abc" not in keywords """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_accessmarker_dynamic(self, testdir): testdir.makeconftest( """ import pytest @pytest.fixture() def keywords(request): return request.keywords @pytest.fixture(scope="class", autouse=True) def marking(request): request.applymarker(pytest.mark.XYZ("hello")) """ ) testdir.makepyfile( """ import pytest def test_fun1(keywords): assert keywords["XYZ"] is not None assert "abc" not in keywords def test_fun2(keywords): assert keywords["XYZ"] is not None assert "abc" not in keywords """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) class TestRequestCachedSetup(object): def test_request_cachedsetup_defaultmodule(self, testdir): reprec = testdir.inline_runsource( """ mysetup = ["hello",].pop import pytest @pytest.fixture def something(request): return request.cached_setup(mysetup, scope="module") def test_func1(something): assert something == "hello" class TestClass(object): def test_func1a(self, something): assert something == "hello" """ ) reprec.assertoutcome(passed=2) def test_request_cachedsetup_class(self, testdir): reprec = testdir.inline_runsource( """ mysetup = ["hello", "hello2", "hello3"].pop import pytest @pytest.fixture def something(request): return request.cached_setup(mysetup, scope="class") def test_func1(something): assert something == "hello3" def test_func2(something): assert something == "hello2" class TestClass(object): def test_func1a(self, something): assert something == "hello" def test_func2b(self, something): assert something == "hello" """ ) reprec.assertoutcome(passed=4) @pytest.mark.filterwarnings("ignore:cached_setup is deprecated") def test_request_cachedsetup_extrakey(self, testdir): item1 = testdir.getitem("def test_func(): pass") req1 = fixtures.FixtureRequest(item1) values = ["hello", "world"] def setup(): return values.pop() ret1 = req1.cached_setup(setup, extrakey=1) ret2 = req1.cached_setup(setup, extrakey=2) assert ret2 == "hello" assert ret1 == "world" ret1b = req1.cached_setup(setup, extrakey=1) ret2b = req1.cached_setup(setup, extrakey=2) assert ret1 == ret1b assert ret2 == ret2b @pytest.mark.filterwarnings("ignore:cached_setup is deprecated") def test_request_cachedsetup_cache_deletion(self, testdir): item1 = testdir.getitem("def test_func(): pass") req1 = fixtures.FixtureRequest(item1) values = [] def setup(): values.append("setup") def teardown(val): values.append("teardown") req1.cached_setup(setup, teardown, scope="function") assert values == ["setup"] # artificial call of finalizer setupstate = req1._pyfuncitem.session._setupstate setupstate._callfinalizers(item1) assert values == ["setup", "teardown"] req1.cached_setup(setup, teardown, scope="function") assert values == ["setup", "teardown", "setup"] setupstate._callfinalizers(item1) assert values == ["setup", "teardown", "setup", "teardown"] def test_request_cached_setup_two_args(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture def arg1(request): return request.cached_setup(lambda: 42) @pytest.fixture def arg2(request): return request.cached_setup(lambda: 17) def test_two_different_setups(arg1, arg2): assert arg1 != arg2 """ ) result = testdir.runpytest("-v") result.stdout.fnmatch_lines(["*1 passed*"]) def test_request_cached_setup_getfixturevalue(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture def arg1(request): arg1 = request.getfixturevalue("arg2") return request.cached_setup(lambda: arg1 + 1) @pytest.fixture def arg2(request): return request.cached_setup(lambda: 10) def test_two_funcarg(arg1): assert arg1 == 11 """ ) result = testdir.runpytest("-v") result.stdout.fnmatch_lines(["*1 passed*"]) def test_request_cached_setup_functional(self, testdir): testdir.makepyfile( test_0=""" import pytest values = [] @pytest.fixture def something(request): val = request.cached_setup(fsetup, fteardown) return val def fsetup(mycache=[1]): values.append(mycache.pop()) return values def fteardown(something): values.remove(something[0]) values.append(2) def test_list_once(something): assert something == [1] def test_list_twice(something): assert something == [1] """ ) testdir.makepyfile( test_1=""" import test_0 # should have run already def test_check_test0_has_teardown_correct(): assert test_0.values == [2] """ ) result = testdir.runpytest("-v") result.stdout.fnmatch_lines(["*3 passed*"]) def test_issue117_sessionscopeteardown(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture def app(request): app = request.cached_setup( scope='session', setup=lambda: 0, teardown=lambda x: 3/x) return app def test_func(app): pass """ ) result = testdir.runpytest() assert result.ret != 0 result.stdout.fnmatch_lines(["*3/x*", "*ZeroDivisionError*"]) class TestFixtureUsages(object): def test_noargfixturedec(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture def arg1(): return 1 def test_func(arg1): assert arg1 == 1 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_receives_funcargs(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture() def arg1(): return 1 @pytest.fixture() def arg2(arg1): return arg1 + 1 def test_add(arg2): assert arg2 == 2 def test_all(arg1, arg2): assert arg1 == 1 assert arg2 == 2 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_receives_funcargs_scope_mismatch(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(scope="function") def arg1(): return 1 @pytest.fixture(scope="module") def arg2(arg1): return arg1 + 1 def test_add(arg2): assert arg2 == 2 """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "*ScopeMismatch*involved factories*", "* def arg2*", "* def arg1*", "*1 error*", ] ) def test_receives_funcargs_scope_mismatch_issue660(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(scope="function") def arg1(): return 1 @pytest.fixture(scope="module") def arg2(arg1): return arg1 + 1 def test_add(arg1, arg2): assert arg2 == 2 """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( ["*ScopeMismatch*involved factories*", "* def arg2*", "*1 error*"] ) def test_invalid_scope(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(scope="functions") def badscope(): pass def test_nothing(badscope): pass """ ) result = testdir.runpytest_inprocess() result.stdout.fnmatch_lines( ( "*Fixture 'badscope' from test_invalid_scope.py got an unexpected scope value 'functions'" ) ) def test_funcarg_parametrized_and_used_twice(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(params=[1,2]) def arg1(request): values.append(1) return request.param @pytest.fixture() def arg2(arg1): return arg1 + 1 def test_add(arg1, arg2): assert arg2 == arg1 + 1 assert len(values) == arg1 """ ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*2 passed*"]) def test_factory_uses_unknown_funcarg_as_dependency_error(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture() def fail(missing): return @pytest.fixture() def call_fail(fail): return def test_missing(call_fail): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( """ *pytest.fixture()* *def call_fail(fail)* *pytest.fixture()* *def fail* *fixture*'missing'*not found* """ ) def test_factory_setup_as_classes_fails(self, testdir): testdir.makepyfile( """ import pytest class arg1(object): def __init__(self, request): self.x = 1 arg1 = pytest.fixture()(arg1) """ ) reprec = testdir.inline_run() values = reprec.getfailedcollections() assert len(values) == 1 def test_request_can_be_overridden(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture() def request(request): request.a = 1 return request def test_request(request): assert request.a == 1 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_usefixtures_marker(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(scope="class") def myfix(request): request.cls.hello = "world" values.append(1) class TestClass(object): def test_one(self): assert self.hello == "world" assert len(values) == 1 def test_two(self): assert self.hello == "world" assert len(values) == 1 pytest.mark.usefixtures("myfix")(TestClass) """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_usefixtures_ini(self, testdir): testdir.makeini( """ [pytest] usefixtures = myfix """ ) testdir.makeconftest( """ import pytest @pytest.fixture(scope="class") def myfix(request): request.cls.hello = "world" """ ) testdir.makepyfile( """ class TestClass(object): def test_one(self): assert self.hello == "world" def test_two(self): assert self.hello == "world" """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_usefixtures_seen_in_showmarkers(self, testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines( """ *usefixtures(fixturename1*mark tests*fixtures* """ ) def test_request_instance_issue203(self, testdir): testdir.makepyfile( """ import pytest class TestClass(object): @pytest.fixture def setup1(self, request): assert self == request.instance self.arg1 = 1 def test_hello(self, setup1): assert self.arg1 == 1 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_fixture_parametrized_with_iterator(self, testdir): testdir.makepyfile( """ import pytest values = [] def f(): yield 1 yield 2 dec = pytest.fixture(scope="module", params=f()) @dec def arg(request): return request.param @dec def arg2(request): return request.param def test_1(arg): values.append(arg) def test_2(arg2): values.append(arg2*10) """ ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=4) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values assert values == [1, 2, 10, 20] class TestFixtureManagerParseFactories(object): @pytest.fixture def testdir(self, request): testdir = request.getfixturevalue("testdir") testdir.makeconftest( """ import pytest @pytest.fixture def hello(request): return "conftest" @pytest.fixture def fm(request): return request._fixturemanager @pytest.fixture def item(request): return request._pyfuncitem """ ) return testdir def test_parsefactories_evil_objects_issue214(self, testdir): testdir.makepyfile( """ class A(object): def __call__(self): pass def __getattr__(self, name): raise RuntimeError() a = A() def test_hello(): pass """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1, failed=0) def test_parsefactories_conftest(self, testdir): testdir.makepyfile( """ def test_hello(item, fm): for name in ("fm", "hello", "item"): faclist = fm.getfixturedefs(name, item.nodeid) assert len(faclist) == 1 fac = faclist[0] assert fac.func.__name__ == name """ ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) def test_parsefactories_conftest_and_module_and_class(self, testdir): testdir.makepyfile( """ import pytest import six @pytest.fixture def hello(request): return "module" class TestClass(object): @pytest.fixture def hello(self, request): return "class" def test_hello(self, item, fm): faclist = fm.getfixturedefs("hello", item.nodeid) print (faclist) assert len(faclist) == 3 assert faclist[0].func(item._request) == "conftest" assert faclist[1].func(item._request) == "module" assert faclist[2].func(item._request) == "class" """ ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) def test_parsefactories_relative_node_ids(self, testdir): # example mostly taken from: # https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html runner = testdir.mkdir("runner") package = testdir.mkdir("package") package.join("conftest.py").write( textwrap.dedent( """\ import pytest @pytest.fixture def one(): return 1 """ ) ) package.join("test_x.py").write( textwrap.dedent( """\ def test_x(one): assert one == 1 """ ) ) sub = package.mkdir("sub") sub.join("__init__.py").ensure() sub.join("conftest.py").write( textwrap.dedent( """\ import pytest @pytest.fixture def one(): return 2 """ ) ) sub.join("test_y.py").write( textwrap.dedent( """\ def test_x(one): assert one == 2 """ ) ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) with runner.as_cwd(): reprec = testdir.inline_run("..") reprec.assertoutcome(passed=2) def test_package_xunit_fixture(self, testdir): testdir.makepyfile( __init__="""\ values = [] """ ) package = testdir.mkdir("package") package.join("__init__.py").write( textwrap.dedent( """\ from .. import values def setup_module(): values.append("package") def teardown_module(): values[:] = [] """ ) ) package.join("test_x.py").write( textwrap.dedent( """\ from .. import values def test_x(): assert values == ["package"] """ ) ) package = testdir.mkdir("package2") package.join("__init__.py").write( textwrap.dedent( """\ from .. import values def setup_module(): values.append("package2") def teardown_module(): values[:] = [] """ ) ) package.join("test_x.py").write( textwrap.dedent( """\ from .. import values def test_x(): assert values == ["package2"] """ ) ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_package_fixture_complex(self, testdir): testdir.makepyfile( __init__="""\ values = [] """ ) testdir.syspathinsert(testdir.tmpdir.dirname) package = testdir.mkdir("package") package.join("__init__.py").write("") package.join("conftest.py").write( textwrap.dedent( """\ import pytest from .. import values @pytest.fixture(scope="package") def one(): values.append("package") yield values values.pop() @pytest.fixture(scope="package", autouse=True) def two(): values.append("package-auto") yield values values.pop() """ ) ) package.join("test_x.py").write( textwrap.dedent( """\ from .. import values def test_package_autouse(): assert values == ["package-auto"] def test_package(one): assert values == ["package-auto", "package"] """ ) ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_collect_custom_items(self, testdir): testdir.copy_example("fixtures/custom_item") result = testdir.runpytest("foo") result.stdout.fnmatch_lines("*passed*") class TestAutouseDiscovery(object): @pytest.fixture def testdir(self, testdir): testdir.makeconftest( """ import pytest @pytest.fixture(autouse=True) def perfunction(request, tmpdir): pass @pytest.fixture() def arg1(tmpdir): pass @pytest.fixture(autouse=True) def perfunction2(arg1): pass @pytest.fixture def fm(request): return request._fixturemanager @pytest.fixture def item(request): return request._pyfuncitem """ ) return testdir def test_parsefactories_conftest(self, testdir): testdir.makepyfile( """ from _pytest.pytester import get_public_names def test_check_setup(item, fm): autousenames = fm._getautousenames(item.nodeid) assert len(get_public_names(autousenames)) == 2 assert "perfunction2" in autousenames assert "perfunction" in autousenames """ ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) def test_two_classes_separated_autouse(self, testdir): testdir.makepyfile( """ import pytest class TestA(object): values = [] @pytest.fixture(autouse=True) def setup1(self): self.values.append(1) def test_setup1(self): assert self.values == [1] class TestB(object): values = [] @pytest.fixture(autouse=True) def setup2(self): self.values.append(1) def test_setup2(self): assert self.values == [1] """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_setup_at_classlevel(self, testdir): testdir.makepyfile( """ import pytest class TestClass(object): @pytest.fixture(autouse=True) def permethod(self, request): request.instance.funcname = request.function.__name__ def test_method1(self): assert self.funcname == "test_method1" def test_method2(self): assert self.funcname == "test_method2" """ ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) @pytest.mark.xfail(reason="'enabled' feature not implemented") def test_setup_enabled_functionnode(self, testdir): testdir.makepyfile( """ import pytest def enabled(parentnode, markers): return "needsdb" in markers @pytest.fixture(params=[1,2]) def db(request): return request.param @pytest.fixture(enabled=enabled, autouse=True) def createdb(db): pass def test_func1(request): assert "db" not in request.fixturenames @pytest.mark.needsdb def test_func2(request): assert "db" in request.fixturenames """ ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) def test_callables_nocode(self, testdir): """ an imported mock.call would break setup/factory discovery due to it being callable and __code__ not being a code object """ testdir.makepyfile( """ class _call(tuple): def __call__(self, *k, **kw): pass def __getattr__(self, k): return self call = _call() """ ) reprec = testdir.inline_run("-s") reprec.assertoutcome(failed=0, passed=0) def test_autouse_in_conftests(self, testdir): a = testdir.mkdir("a") b = testdir.mkdir("a1") conftest = testdir.makeconftest( """ import pytest @pytest.fixture(autouse=True) def hello(): xxx """ ) conftest.move(a.join(conftest.basename)) a.join("test_something.py").write("def test_func(): pass") b.join("test_otherthing.py").write("def test_func(): pass") result = testdir.runpytest() result.stdout.fnmatch_lines( """ *1 passed*1 error* """ ) def test_autouse_in_module_and_two_classes(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(autouse=True) def append1(): values.append("module") def test_x(): assert values == ["module"] class TestA(object): @pytest.fixture(autouse=True) def append2(self): values.append("A") def test_hello(self): assert values == ["module", "module", "A"], values class TestA2(object): def test_world(self): assert values == ["module", "module", "A", "module"], values """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) class TestAutouseManagement(object): def test_autouse_conftest_mid_directory(self, testdir): pkgdir = testdir.mkpydir("xyz123") pkgdir.join("conftest.py").write( textwrap.dedent( """\ import pytest @pytest.fixture(autouse=True) def app(): import sys sys._myapp = "hello" """ ) ) t = pkgdir.ensure("tests", "test_app.py") t.write( textwrap.dedent( """\ import sys def test_app(): assert sys._myapp == "hello" """ ) ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) def test_autouse_honored_for_yield(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(autouse=True) def tst(): global x x = 3 def test_gen(): def f(hello): assert x == abs(hello) yield f, 3 yield f, -3 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_funcarg_and_setup(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(scope="module") def arg(): values.append(1) return 0 @pytest.fixture(scope="module", autouse=True) def something(arg): values.append(2) def test_hello(arg): assert len(values) == 2 assert values == [1,2] assert arg == 0 def test_hello2(arg): assert len(values) == 2 assert values == [1,2] assert arg == 0 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_uses_parametrized_resource(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(params=[1,2]) def arg(request): return request.param @pytest.fixture(autouse=True) def something(arg): values.append(arg) def test_hello(): if len(values) == 1: assert values == [1] elif len(values) == 2: assert values == [1, 2] else: 0/0 """ ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) def test_session_parametrized_function(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(scope="session", params=[1,2]) def arg(request): return request.param @pytest.fixture(scope="function", autouse=True) def append(request, arg): if request.function.__name__ == "test_some": values.append(arg) def test_some(): pass def test_result(arg): assert len(values) == arg assert values[:arg] == [1,2][:arg] """ ) reprec = testdir.inline_run("-v", "-s") reprec.assertoutcome(passed=4) def test_class_function_parametrization_finalization(self, testdir): p = testdir.makeconftest( """ import pytest import pprint values = [] @pytest.fixture(scope="function", params=[1,2]) def farg(request): return request.param @pytest.fixture(scope="class", params=list("ab")) def carg(request): return request.param @pytest.fixture(scope="function", autouse=True) def append(request, farg, carg): def fin(): values.append("fin_%s%s" % (carg, farg)) request.addfinalizer(fin) """ ) testdir.makepyfile( """ import pytest class TestClass(object): def test_1(self): pass class TestClass2(object): def test_2(self): pass """ ) confcut = "--confcutdir={}".format(testdir.tmpdir) reprec = testdir.inline_run("-v", "-s", confcut) reprec.assertoutcome(passed=8) config = reprec.getcalls("pytest_unconfigure")[0].config values = config.pluginmanager._getconftestmodules(p)[0].values assert values == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2 def test_scope_ordering(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(scope="function", autouse=True) def fappend2(): values.append(2) @pytest.fixture(scope="class", autouse=True) def classappend3(): values.append(3) @pytest.fixture(scope="module", autouse=True) def mappend(): values.append(1) class TestHallo(object): def test_method(self): assert values == [1,3,2] """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_parametrization_setup_teardown_ordering(self, testdir): testdir.makepyfile( """ import pytest values = [] def pytest_generate_tests(metafunc): if metafunc.cls is None: assert metafunc.function is test_finish if metafunc.cls is not None: metafunc.parametrize("item", [1,2], scope="class") class TestClass(object): @pytest.fixture(scope="class", autouse=True) def addteardown(self, item, request): values.append("setup-%d" % item) request.addfinalizer(lambda: values.append("teardown-%d" % item)) def test_step1(self, item): values.append("step1-%d" % item) def test_step2(self, item): values.append("step2-%d" % item) def test_finish(): print (values) assert values == ["setup-1", "step1-1", "step2-1", "teardown-1", "setup-2", "step1-2", "step2-2", "teardown-2",] """ ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=5) def test_ordering_autouse_before_explicit(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(autouse=True) def fix1(): values.append(1) @pytest.fixture() def arg1(): values.append(2) def test_hello(arg1): assert values == [1,2] """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @pytest.mark.issue226 @pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00", "p01"]) @pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10", "p11"]) def test_ordering_dependencies_torndown_first(self, testdir, param1, param2): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(%(param1)s) def arg1(request): request.addfinalizer(lambda: values.append("fin1")) values.append("new1") @pytest.fixture(%(param2)s) def arg2(request, arg1): request.addfinalizer(lambda: values.append("fin2")) values.append("new2") def test_arg(arg2): pass def test_check(): assert values == ["new1", "new2", "fin2", "fin1"] """ % locals() ) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) class TestFixtureMarker(object): def test_parametrize(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(params=["a", "b", "c"]) def arg(request): return request.param values = [] def test_param(arg): values.append(arg) def test_result(): assert values == list("abc") """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=4) def test_multiple_parametrization_issue_736(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(params=[1,2,3]) def foo(request): return request.param @pytest.mark.parametrize('foobar', [4,5,6]) def test_issue(foo, foobar): assert foo in [1,2,3] assert foobar in [4,5,6] """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=9) @pytest.mark.parametrize( "param_args", ["'fixt, val'", "'fixt,val'", "['fixt', 'val']", "('fixt', 'val')"], ) def test_override_parametrized_fixture_issue_979(self, testdir, param_args): """Make sure a parametrized argument can override a parametrized fixture. This was a regression introduced in the fix for #736. """ testdir.makepyfile( """ import pytest @pytest.fixture(params=[1, 2]) def fixt(request): return request.param @pytest.mark.parametrize(%s, [(3, 'x'), (4, 'x')]) def test_foo(fixt, val): pass """ % param_args ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) def test_scope_session(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(scope="module") def arg(): values.append(1) return 1 def test_1(arg): assert arg == 1 def test_2(arg): assert arg == 1 assert len(values) == 1 class TestClass(object): def test3(self, arg): assert arg == 1 assert len(values) == 1 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) def test_scope_session_exc(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(scope="session") def fix(): values.append(1) pytest.skip('skipping') def test_1(fix): pass def test_2(fix): pass def test_last(): assert values == [1] """ ) reprec = testdir.inline_run() reprec.assertoutcome(skipped=2, passed=1) def test_scope_session_exc_two_fix(self, testdir): testdir.makepyfile( """ import pytest values = [] m = [] @pytest.fixture(scope="session") def a(): values.append(1) pytest.skip('skipping') @pytest.fixture(scope="session") def b(a): m.append(1) def test_1(b): pass def test_2(b): pass def test_last(): assert values == [1] assert m == [] """ ) reprec = testdir.inline_run() reprec.assertoutcome(skipped=2, passed=1) def test_scope_exc(self, testdir): testdir.makepyfile( test_foo=""" def test_foo(fix): pass """, test_bar=""" def test_bar(fix): pass """, conftest=""" import pytest reqs = [] @pytest.fixture(scope="session") def fix(request): reqs.append(1) pytest.skip() @pytest.fixture def req_list(): return reqs """, test_real=""" def test_last(req_list): assert req_list == [1] """, ) reprec = testdir.inline_run() reprec.assertoutcome(skipped=2, passed=1) def test_scope_module_uses_session(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(scope="module") def arg(): values.append(1) return 1 def test_1(arg): assert arg == 1 def test_2(arg): assert arg == 1 assert len(values) == 1 class TestClass(object): def test3(self, arg): assert arg == 1 assert len(values) == 1 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) def test_scope_module_and_finalizer(self, testdir): testdir.makeconftest( """ import pytest finalized_list = [] created_list = [] @pytest.fixture(scope="module") def arg(request): created_list.append(1) assert request.scope == "module" request.addfinalizer(lambda: finalized_list.append(1)) @pytest.fixture def created(request): return len(created_list) @pytest.fixture def finalized(request): return len(finalized_list) """ ) testdir.makepyfile( test_mod1=""" def test_1(arg, created, finalized): assert created == 1 assert finalized == 0 def test_2(arg, created, finalized): assert created == 1 assert finalized == 0""", test_mod2=""" def test_3(arg, created, finalized): assert created == 2 assert finalized == 1""", test_mode3=""" def test_4(arg, created, finalized): assert created == 3 assert finalized == 2 """, ) reprec = testdir.inline_run() reprec.assertoutcome(passed=4) @pytest.mark.parametrize( "method", [ 'request.getfixturevalue("arg")', 'request.cached_setup(lambda: None, scope="function")', ], ids=["getfixturevalue", "cached_setup"], ) def test_scope_mismatch_various(self, testdir, method): testdir.makeconftest( """ import pytest finalized = [] created = [] @pytest.fixture(scope="function") def arg(request): pass """ ) testdir.makepyfile( test_mod1=""" import pytest @pytest.fixture(scope="session") def arg(request): %s def test_1(arg): pass """ % method ) result = testdir.runpytest() assert result.ret != 0 result.stdout.fnmatch_lines( ["*ScopeMismatch*You tried*function*session*request*"] ) def test_register_only_with_mark(self, testdir): testdir.makeconftest( """ import pytest @pytest.fixture() def arg(): return 1 """ ) testdir.makepyfile( test_mod1=""" import pytest @pytest.fixture() def arg(arg): return arg + 1 def test_1(arg): assert arg == 2 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_parametrize_and_scope(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(scope="module", params=["a", "b", "c"]) def arg(request): return request.param values = [] def test_param(arg): values.append(arg) """ ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=3) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values assert len(values) == 3 assert "a" in values assert "b" in values assert "c" in values def test_scope_mismatch(self, testdir): testdir.makeconftest( """ import pytest @pytest.fixture(scope="function") def arg(request): pass """ ) testdir.makepyfile( """ import pytest @pytest.fixture(scope="session") def arg(arg): pass def test_mismatch(arg): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines(["*ScopeMismatch*", "*1 error*"]) def test_parametrize_separated_order(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(scope="module", params=[1, 2]) def arg(request): return request.param values = [] def test_1(arg): values.append(arg) def test_2(arg): values.append(arg) """ ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=4) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values assert values == [1, 1, 2, 2] def test_module_parametrized_ordering(self, testdir): testdir.makeini( """ [pytest] console_output_style=classic """ ) testdir.makeconftest( """ import pytest @pytest.fixture(scope="session", params="s1 s2".split()) def sarg(): pass @pytest.fixture(scope="module", params="m1 m2".split()) def marg(): pass """ ) testdir.makepyfile( test_mod1=""" def test_func(sarg): pass def test_func1(marg): pass """, test_mod2=""" def test_func2(sarg): pass def test_func3(sarg, marg): pass def test_func3b(sarg, marg): pass def test_func4(marg): pass """, ) result = testdir.runpytest("-v") result.stdout.fnmatch_lines( """ test_mod1.py::test_func[s1] PASSED test_mod2.py::test_func2[s1] PASSED test_mod2.py::test_func3[s1-m1] PASSED test_mod2.py::test_func3b[s1-m1] PASSED test_mod2.py::test_func3[s1-m2] PASSED test_mod2.py::test_func3b[s1-m2] PASSED test_mod1.py::test_func[s2] PASSED test_mod2.py::test_func2[s2] PASSED test_mod2.py::test_func3[s2-m1] PASSED test_mod2.py::test_func3b[s2-m1] PASSED test_mod2.py::test_func4[m1] PASSED test_mod2.py::test_func3[s2-m2] PASSED test_mod2.py::test_func3b[s2-m2] PASSED test_mod2.py::test_func4[m2] PASSED test_mod1.py::test_func1[m1] PASSED test_mod1.py::test_func1[m2] PASSED """ ) def test_dynamic_parametrized_ordering(self, testdir): testdir.makeini( """ [pytest] console_output_style=classic """ ) testdir.makeconftest( """ import pytest def pytest_configure(config): class DynamicFixturePlugin(object): @pytest.fixture(scope='session', params=['flavor1', 'flavor2']) def flavor(self, request): return request.param config.pluginmanager.register(DynamicFixturePlugin(), 'flavor-fixture') @pytest.fixture(scope='session', params=['vxlan', 'vlan']) def encap(request): return request.param @pytest.fixture(scope='session', autouse='True') def reprovision(request, flavor, encap): pass """ ) testdir.makepyfile( """ def test(reprovision): pass def test2(reprovision): pass """ ) result = testdir.runpytest("-v") result.stdout.fnmatch_lines( """ test_dynamic_parametrized_ordering.py::test[flavor1-vxlan] PASSED test_dynamic_parametrized_ordering.py::test2[flavor1-vxlan] PASSED test_dynamic_parametrized_ordering.py::test[flavor2-vxlan] PASSED test_dynamic_parametrized_ordering.py::test2[flavor2-vxlan] PASSED test_dynamic_parametrized_ordering.py::test[flavor2-vlan] PASSED test_dynamic_parametrized_ordering.py::test2[flavor2-vlan] PASSED test_dynamic_parametrized_ordering.py::test[flavor1-vlan] PASSED test_dynamic_parametrized_ordering.py::test2[flavor1-vlan] PASSED """ ) def test_class_ordering(self, testdir): testdir.makeini( """ [pytest] console_output_style=classic """ ) testdir.makeconftest( """ import pytest values = [] @pytest.fixture(scope="function", params=[1,2]) def farg(request): return request.param @pytest.fixture(scope="class", params=list("ab")) def carg(request): return request.param @pytest.fixture(scope="function", autouse=True) def append(request, farg, carg): def fin(): values.append("fin_%s%s" % (carg, farg)) request.addfinalizer(fin) """ ) testdir.makepyfile( """ import pytest class TestClass2(object): def test_1(self): pass def test_2(self): pass class TestClass(object): def test_3(self): pass """ ) result = testdir.runpytest("-vs") result.stdout.re_match_lines( r""" test_class_ordering.py::TestClass2::test_1\[a-1\] PASSED test_class_ordering.py::TestClass2::test_1\[a-2\] PASSED test_class_ordering.py::TestClass2::test_2\[a-1\] PASSED test_class_ordering.py::TestClass2::test_2\[a-2\] PASSED test_class_ordering.py::TestClass2::test_1\[b-1\] PASSED test_class_ordering.py::TestClass2::test_1\[b-2\] PASSED test_class_ordering.py::TestClass2::test_2\[b-1\] PASSED test_class_ordering.py::TestClass2::test_2\[b-2\] PASSED test_class_ordering.py::TestClass::test_3\[a-1\] PASSED test_class_ordering.py::TestClass::test_3\[a-2\] PASSED test_class_ordering.py::TestClass::test_3\[b-1\] PASSED test_class_ordering.py::TestClass::test_3\[b-2\] PASSED """ ) def test_parametrize_separated_order_higher_scope_first(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(scope="function", params=[1, 2]) def arg(request): param = request.param request.addfinalizer(lambda: values.append("fin:%s" % param)) values.append("create:%s" % param) return request.param @pytest.fixture(scope="module", params=["mod1", "mod2"]) def modarg(request): param = request.param request.addfinalizer(lambda: values.append("fin:%s" % param)) values.append("create:%s" % param) return request.param values = [] def test_1(arg): values.append("test1") def test_2(modarg): values.append("test2") def test_3(arg, modarg): values.append("test3") def test_4(modarg, arg): values.append("test4") """ ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=12) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values expected = [ "create:1", "test1", "fin:1", "create:2", "test1", "fin:2", "create:mod1", "test2", "create:1", "test3", "fin:1", "create:2", "test3", "fin:2", "create:1", "test4", "fin:1", "create:2", "test4", "fin:2", "fin:mod1", "create:mod2", "test2", "create:1", "test3", "fin:1", "create:2", "test3", "fin:2", "create:1", "test4", "fin:1", "create:2", "test4", "fin:2", "fin:mod2", ] import pprint pprint.pprint(list(zip(values, expected))) assert values == expected def test_parametrized_fixture_teardown_order(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(params=[1,2], scope="class") def param1(request): return request.param values = [] class TestClass(object): @classmethod @pytest.fixture(scope="class", autouse=True) def setup1(self, request, param1): values.append(1) request.addfinalizer(self.teardown1) @classmethod def teardown1(self): assert values.pop() == 1 @pytest.fixture(scope="class", autouse=True) def setup2(self, request, param1): values.append(2) request.addfinalizer(self.teardown2) @classmethod def teardown2(self): assert values.pop() == 2 def test(self): pass def test_finish(): assert not values """ ) result = testdir.runpytest("-v") result.stdout.fnmatch_lines( """ *3 passed* """ ) assert "error" not in result.stdout.str() def test_fixture_finalizer(self, testdir): testdir.makeconftest( """ import pytest import sys @pytest.fixture def browser(request): def finalize(): sys.stdout.write('Finalized') request.addfinalizer(finalize) return {} """ ) b = testdir.mkdir("subdir") b.join("test_overridden_fixture_finalizer.py").write( textwrap.dedent( """\ import pytest @pytest.fixture def browser(browser): browser['visited'] = True return browser def test_browser(browser): assert browser['visited'] is True """ ) ) reprec = testdir.runpytest("-s") for test in ["test_browser"]: reprec.stdout.fnmatch_lines("*Finalized*") def test_class_scope_with_normal_tests(self, testdir): testpath = testdir.makepyfile( """ import pytest class Box(object): value = 0 @pytest.fixture(scope='class') def a(request): Box.value += 1 return Box.value def test_a(a): assert a == 1 class Test1(object): def test_b(self, a): assert a == 2 class Test2(object): def test_c(self, a): assert a == 3""" ) reprec = testdir.inline_run(testpath) for test in ["test_a", "test_b", "test_c"]: assert reprec.matchreport(test).passed def test_request_is_clean(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(params=[1, 2]) def fix(request): request.addfinalizer(lambda: values.append(request.param)) def test_fix(fix): pass """ ) reprec = testdir.inline_run("-s") values = reprec.getcalls("pytest_runtest_call")[0].item.module.values assert values == [1, 2] def test_parametrize_separated_lifecycle(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(scope="module", params=[1, 2]) def arg(request): x = request.param request.addfinalizer(lambda: values.append("fin%s" % x)) return request.param def test_1(arg): values.append(arg) def test_2(arg): values.append(arg) """ ) reprec = testdir.inline_run("-vs") reprec.assertoutcome(passed=4) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values import pprint pprint.pprint(values) # assert len(values) == 6 assert values[0] == values[1] == 1 assert values[2] == "fin1" assert values[3] == values[4] == 2 assert values[5] == "fin2" def test_parametrize_function_scoped_finalizers_called(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(scope="function", params=[1, 2]) def arg(request): x = request.param request.addfinalizer(lambda: values.append("fin%s" % x)) return request.param values = [] def test_1(arg): values.append(arg) def test_2(arg): values.append(arg) def test_3(): assert len(values) == 8 assert values == [1, "fin1", 2, "fin2", 1, "fin1", 2, "fin2"] """ ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=5) @pytest.mark.issue246 @pytest.mark.parametrize("scope", ["session", "function", "module"]) def test_finalizer_order_on_parametrization(self, scope, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(scope=%(scope)r, params=["1"]) def fix1(request): return request.param @pytest.fixture(scope=%(scope)r) def fix2(request, base): def cleanup_fix2(): assert not values, "base should not have been finalized" request.addfinalizer(cleanup_fix2) @pytest.fixture(scope=%(scope)r) def base(request, fix1): def cleanup_base(): values.append("fin_base") print ("finalizing base") request.addfinalizer(cleanup_base) def test_begin(): pass def test_baz(base, fix2): pass def test_other(): pass """ % {"scope": scope} ) reprec = testdir.inline_run("-lvs") reprec.assertoutcome(passed=3) @pytest.mark.issue396 def test_class_scope_parametrization_ordering(self, testdir): testdir.makepyfile( """ import pytest values = [] @pytest.fixture(params=["John", "Doe"], scope="class") def human(request): request.addfinalizer(lambda: values.append("fin %s" % request.param)) return request.param class TestGreetings(object): def test_hello(self, human): values.append("test_hello") class TestMetrics(object): def test_name(self, human): values.append("test_name") def test_population(self, human): values.append("test_population") """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=6) values = reprec.getcalls("pytest_runtest_call")[0].item.module.values assert values == [ "test_hello", "fin John", "test_hello", "fin Doe", "test_name", "test_population", "fin John", "test_name", "test_population", "fin Doe", ] def test_parametrize_setup_function(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(scope="module", params=[1, 2]) def arg(request): return request.param @pytest.fixture(scope="module", autouse=True) def mysetup(request, arg): request.addfinalizer(lambda: values.append("fin%s" % arg)) values.append("setup%s" % arg) values = [] def test_1(arg): values.append(arg) def test_2(arg): values.append(arg) def test_3(): import pprint pprint.pprint(values) if arg == 1: assert values == ["setup1", 1, 1, ] elif arg == 2: assert values == ["setup1", 1, 1, "fin1", "setup2", 2, 2, ] """ ) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=6) def test_fixture_marked_function_not_collected_as_test(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture def test_app(): return 1 def test_something(test_app): assert test_app == 1 """ ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) def test_params_and_ids(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(params=[object(), object()], ids=['alpha', 'beta']) def fix(request): return request.param def test_foo(fix): assert 1 """ ) res = testdir.runpytest("-v") res.stdout.fnmatch_lines(["*test_foo*alpha*", "*test_foo*beta*"]) def test_params_and_ids_yieldfixture(self, testdir): testdir.makepyfile( """ import pytest @pytest.yield_fixture(params=[object(), object()], ids=['alpha', 'beta']) def fix(request): yield request.param def test_foo(fix): assert 1 """ ) res = testdir.runpytest("-v") res.stdout.fnmatch_lines(["*test_foo*alpha*", "*test_foo*beta*"]) @pytest.mark.issue920 def test_deterministic_fixture_collection(self, testdir, monkeypatch): testdir.makepyfile( """ import pytest @pytest.fixture(scope="module", params=["A", "B", "C"]) def A(request): return request.param @pytest.fixture(scope="module", params=["DDDDDDDDD", "EEEEEEEEEEEE", "FFFFFFFFFFF", "banansda"]) def B(request, A): return request.param def test_foo(B): # Something funky is going on here. # Despite specified seeds, on what is collected, # sometimes we get unexpected passes. hashing B seems # to help? assert hash(B) or True """ ) monkeypatch.setenv("PYTHONHASHSEED", "1") out1 = testdir.runpytest_subprocess("-v") monkeypatch.setenv("PYTHONHASHSEED", "2") out2 = testdir.runpytest_subprocess("-v") out1 = [ line for line in out1.outlines if line.startswith("test_deterministic_fixture_collection.py::test_foo") ] out2 = [ line for line in out2.outlines if line.startswith("test_deterministic_fixture_collection.py::test_foo") ] assert len(out1) == 12 assert out1 == out2 class TestRequestScopeAccess(object): pytestmark = pytest.mark.parametrize( ("scope", "ok", "error"), [ ["session", "", "fspath class function module"], ["module", "module fspath", "cls function"], ["class", "module fspath cls", "function"], ["function", "module fspath cls function", ""], ], ) def test_setup(self, testdir, scope, ok, error): testdir.makepyfile( """ import pytest @pytest.fixture(scope=%r, autouse=True) def myscoped(request): for x in %r: assert hasattr(request, x) for x in %r: pytest.raises(AttributeError, lambda: getattr(request, x)) assert request.session assert request.config def test_func(): pass """ % (scope, ok.split(), error.split()) ) reprec = testdir.inline_run("-l") reprec.assertoutcome(passed=1) def test_funcarg(self, testdir, scope, ok, error): testdir.makepyfile( """ import pytest @pytest.fixture(scope=%r) def arg(request): for x in %r: assert hasattr(request, x) for x in %r: pytest.raises(AttributeError, lambda: getattr(request, x)) assert request.session assert request.config def test_func(arg): pass """ % (scope, ok.split(), error.split()) ) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) class TestErrors(object): def test_subfactory_missing_funcarg(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture() def gen(qwe123): return 1 def test_something(gen): pass """ ) result = testdir.runpytest() assert result.ret != 0 result.stdout.fnmatch_lines( ["*def gen(qwe123):*", "*fixture*qwe123*not found*", "*1 error*"] ) def test_issue498_fixture_finalizer_failing(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture def fix1(request): def f(): raise KeyError request.addfinalizer(f) return object() values = [] def test_1(fix1): values.append(fix1) def test_2(fix1): values.append(fix1) def test_3(): assert values[0] != values[1] """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( """ *ERROR*teardown*test_1* *KeyError* *ERROR*teardown*test_2* *KeyError* *3 pass*2 error* """ ) def test_setupfunc_missing_funcarg(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(autouse=True) def gen(qwe123): return 1 def test_something(): pass """ ) result = testdir.runpytest() assert result.ret != 0 result.stdout.fnmatch_lines( ["*def gen(qwe123):*", "*fixture*qwe123*not found*", "*1 error*"] ) class TestShowFixtures(object): def test_funcarg_compat(self, testdir): config = testdir.parseconfigure("--funcargs") assert config.option.showfixtures def test_show_fixtures(self, testdir): result = testdir.runpytest("--fixtures") result.stdout.fnmatch_lines(["*tmpdir*", "*temporary directory*"]) def test_show_fixtures_verbose(self, testdir): result = testdir.runpytest("--fixtures", "-v") result.stdout.fnmatch_lines(["*tmpdir*--*tmpdir.py*", "*temporary directory*"]) def test_show_fixtures_testmodule(self, testdir): p = testdir.makepyfile( ''' import pytest @pytest.fixture def _arg0(): """ hidden """ @pytest.fixture def arg1(): """ hello world """ ''' ) result = testdir.runpytest("--fixtures", p) result.stdout.fnmatch_lines( """ *tmpdir *fixtures defined from* *arg1* *hello world* """ ) assert "arg0" not in result.stdout.str() @pytest.mark.parametrize("testmod", [True, False]) def test_show_fixtures_conftest(self, testdir, testmod): testdir.makeconftest( ''' import pytest @pytest.fixture def arg1(): """ hello world """ ''' ) if testmod: testdir.makepyfile( """ def test_hello(): pass """ ) result = testdir.runpytest("--fixtures") result.stdout.fnmatch_lines( """ *tmpdir* *fixtures defined from*conftest* *arg1* *hello world* """ ) def test_show_fixtures_trimmed_doc(self, testdir): p = testdir.makepyfile( textwrap.dedent( '''\ import pytest @pytest.fixture def arg1(): """ line1 line2 """ @pytest.fixture def arg2(): """ line1 line2 """ ''' ) ) result = testdir.runpytest("--fixtures", p) result.stdout.fnmatch_lines( textwrap.dedent( """\ * fixtures defined from test_show_fixtures_trimmed_doc * arg2 line1 line2 arg1 line1 line2 """ ) ) def test_show_fixtures_indented_doc(self, testdir): p = testdir.makepyfile( textwrap.dedent( '''\ import pytest @pytest.fixture def fixture1(): """ line1 indented line """ ''' ) ) result = testdir.runpytest("--fixtures", p) result.stdout.fnmatch_lines( textwrap.dedent( """\ * fixtures defined from test_show_fixtures_indented_doc * fixture1 line1 indented line """ ) ) def test_show_fixtures_indented_doc_first_line_unindented(self, testdir): p = testdir.makepyfile( textwrap.dedent( '''\ import pytest @pytest.fixture def fixture1(): """line1 line2 indented line """ ''' ) ) result = testdir.runpytest("--fixtures", p) result.stdout.fnmatch_lines( textwrap.dedent( """\ * fixtures defined from test_show_fixtures_indented_doc_first_line_unindented * fixture1 line1 line2 indented line """ ) ) def test_show_fixtures_indented_in_class(self, testdir): p = testdir.makepyfile( textwrap.dedent( '''\ import pytest class TestClass(object): @pytest.fixture def fixture1(self): """line1 line2 indented line """ ''' ) ) result = testdir.runpytest("--fixtures", p) result.stdout.fnmatch_lines( textwrap.dedent( """\ * fixtures defined from test_show_fixtures_indented_in_class * fixture1 line1 line2 indented line """ ) ) def test_show_fixtures_different_files(self, testdir): """ #833: --fixtures only shows fixtures from first file """ testdir.makepyfile( test_a=''' import pytest @pytest.fixture def fix_a(): """Fixture A""" pass def test_a(fix_a): pass ''' ) testdir.makepyfile( test_b=''' import pytest @pytest.fixture def fix_b(): """Fixture B""" pass def test_b(fix_b): pass ''' ) result = testdir.runpytest("--fixtures") result.stdout.fnmatch_lines( """ * fixtures defined from test_a * fix_a Fixture A * fixtures defined from test_b * fix_b Fixture B """ ) def test_show_fixtures_with_same_name(self, testdir): testdir.makeconftest( ''' import pytest @pytest.fixture def arg1(): """Hello World in conftest.py""" return "Hello World" ''' ) testdir.makepyfile( """ def test_foo(arg1): assert arg1 == "Hello World" """ ) testdir.makepyfile( ''' import pytest @pytest.fixture def arg1(): """Hi from test module""" return "Hi" def test_bar(arg1): assert arg1 == "Hi" ''' ) result = testdir.runpytest("--fixtures") result.stdout.fnmatch_lines( """ * fixtures defined from conftest * arg1 Hello World in conftest.py * fixtures defined from test_show_fixtures_with_same_name * arg1 Hi from test module """ ) def test_fixture_disallow_twice(self): """Test that applying @pytest.fixture twice generates an error (#2334).""" with pytest.raises(ValueError): @pytest.fixture @pytest.fixture def foo(): pass class TestContextManagerFixtureFuncs(object): @pytest.fixture(params=["fixture", "yield_fixture"]) def flavor(self, request, testdir, monkeypatch): monkeypatch.setenv("PYTEST_FIXTURE_FLAVOR", request.param) testdir.makepyfile( test_context=""" import os import pytest import warnings VAR = "PYTEST_FIXTURE_FLAVOR" if VAR not in os.environ: warnings.warn("PYTEST_FIXTURE_FLAVOR was not set, assuming fixture") fixture = pytest.fixture else: fixture = getattr(pytest, os.environ[VAR]) """ ) def test_simple(self, testdir, flavor): testdir.makepyfile( """ from __future__ import print_function from test_context import fixture @fixture def arg1(): print ("setup") yield 1 print ("teardown") def test_1(arg1): print ("test1", arg1) def test_2(arg1): print ("test2", arg1) assert 0 """ ) result = testdir.runpytest("-s") result.stdout.fnmatch_lines( """ *setup* *test1 1* *teardown* *setup* *test2 1* *teardown* """ ) def test_scoped(self, testdir, flavor): testdir.makepyfile( """ from __future__ import print_function from test_context import fixture @fixture(scope="module") def arg1(): print ("setup") yield 1 print ("teardown") def test_1(arg1): print ("test1", arg1) def test_2(arg1): print ("test2", arg1) """ ) result = testdir.runpytest("-s") result.stdout.fnmatch_lines( """ *setup* *test1 1* *test2 1* *teardown* """ ) def test_setup_exception(self, testdir, flavor): testdir.makepyfile( """ from test_context import fixture @fixture(scope="module") def arg1(): pytest.fail("setup") yield 1 def test_1(arg1): pass """ ) result = testdir.runpytest("-s") result.stdout.fnmatch_lines( """ *pytest.fail*setup* *1 error* """ ) def test_teardown_exception(self, testdir, flavor): testdir.makepyfile( """ from test_context import fixture @fixture(scope="module") def arg1(): yield 1 pytest.fail("teardown") def test_1(arg1): pass """ ) result = testdir.runpytest("-s") result.stdout.fnmatch_lines( """ *pytest.fail*teardown* *1 passed*1 error* """ ) def test_yields_more_than_one(self, testdir, flavor): testdir.makepyfile( """ from test_context import fixture @fixture(scope="module") def arg1(): yield 1 yield 2 def test_1(arg1): pass """ ) result = testdir.runpytest("-s") result.stdout.fnmatch_lines( """ *fixture function* *test_yields*:2* """ ) def test_custom_name(self, testdir, flavor): testdir.makepyfile( """ from test_context import fixture @fixture(name='meow') def arg1(): return 'mew' def test_1(meow): print(meow) """ ) result = testdir.runpytest("-s") result.stdout.fnmatch_lines("*mew*") class TestParameterizedSubRequest(object): def test_call_from_fixture(self, testdir): testdir.makepyfile( test_call_from_fixture=""" import pytest @pytest.fixture(params=[0, 1, 2]) def fix_with_param(request): return request.param @pytest.fixture def get_named_fixture(request): return request.getfixturevalue('fix_with_param') def test_foo(request, get_named_fixture): pass """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "The requested fixture has no parameter defined for test:", " test_call_from_fixture.py::test_foo", "Requested fixture 'fix_with_param' defined in:", "test_call_from_fixture.py:4", "Requested here:", "test_call_from_fixture.py:9", "*1 error in*", ] ) def test_call_from_test(self, testdir): testdir.makepyfile( test_call_from_test=""" import pytest @pytest.fixture(params=[0, 1, 2]) def fix_with_param(request): return request.param def test_foo(request): request.getfixturevalue('fix_with_param') """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "The requested fixture has no parameter defined for test:", " test_call_from_test.py::test_foo", "Requested fixture 'fix_with_param' defined in:", "test_call_from_test.py:4", "Requested here:", "test_call_from_test.py:8", "*1 failed*", ] ) def test_external_fixture(self, testdir): testdir.makeconftest( """ import pytest @pytest.fixture(params=[0, 1, 2]) def fix_with_param(request): return request.param """ ) testdir.makepyfile( test_external_fixture=""" def test_foo(request): request.getfixturevalue('fix_with_param') """ ) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "The requested fixture has no parameter defined for test:", " test_external_fixture.py::test_foo", "", "Requested fixture 'fix_with_param' defined in:", "conftest.py:4", "Requested here:", "test_external_fixture.py:2", "*1 failed*", ] ) def test_non_relative_path(self, testdir): tests_dir = testdir.mkdir("tests") fixdir = testdir.mkdir("fixtures") fixfile = fixdir.join("fix.py") fixfile.write( textwrap.dedent( """\ import pytest @pytest.fixture(params=[0, 1, 2]) def fix_with_param(request): return request.param """ ) ) testfile = tests_dir.join("test_foos.py") testfile.write( textwrap.dedent( """\ from fix import fix_with_param def test_foo(request): request.getfixturevalue('fix_with_param') """ ) ) tests_dir.chdir() testdir.syspathinsert(fixdir) result = testdir.runpytest() result.stdout.fnmatch_lines( [ "The requested fixture has no parameter defined for test:", " test_foos.py::test_foo", "", "Requested fixture 'fix_with_param' defined in:", "*fix.py:4", "Requested here:", "test_foos.py:4", "*1 failed*", ] ) def test_pytest_fixture_setup_and_post_finalizer_hook(testdir): testdir.makeconftest( """ from __future__ import print_function def pytest_fixture_setup(fixturedef, request): print('ROOT setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) def pytest_fixture_post_finalizer(fixturedef, request): print('ROOT finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) """ ) testdir.makepyfile( **{ "tests/conftest.py": """ from __future__ import print_function def pytest_fixture_setup(fixturedef, request): print('TESTS setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) def pytest_fixture_post_finalizer(fixturedef, request): print('TESTS finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name)) """, "tests/test_hooks.py": """ from __future__ import print_function import pytest @pytest.fixture() def my_fixture(): return 'some' def test_func(my_fixture): print('TEST test_func') assert my_fixture == 'some' """, } ) result = testdir.runpytest("-s") assert result.ret == 0 result.stdout.fnmatch_lines( [ "*TESTS setup hook called for my_fixture from test_func*", "*ROOT setup hook called for my_fixture from test_func*", "*TEST test_func*", "*TESTS finalizer hook called for my_fixture from test_func*", "*ROOT finalizer hook called for my_fixture from test_func*", ] ) class TestScopeOrdering(object): """Class of tests that ensure fixtures are ordered based on their scopes (#2405)""" @pytest.mark.parametrize("variant", ["mark", "autouse"]) @pytest.mark.issue(github="#2405") def test_func_closure_module_auto(self, testdir, variant, monkeypatch): """Semantically identical to the example posted in #2405 when ``use_mark=True``""" monkeypatch.setenv("FIXTURE_ACTIVATION_VARIANT", variant) testdir.makepyfile( """ import warnings import os import pytest VAR = 'FIXTURE_ACTIVATION_VARIANT' VALID_VARS = ('autouse', 'mark') VARIANT = os.environ.get(VAR) if VARIANT is None or VARIANT not in VALID_VARS: warnings.warn("{!r} is not in {}, assuming autouse".format(VARIANT, VALID_VARS) ) variant = 'mark' @pytest.fixture(scope='module', autouse=VARIANT == 'autouse') def m1(): pass if VARIANT=='mark': pytestmark = pytest.mark.usefixtures('m1') @pytest.fixture(scope='function', autouse=True) def f1(): pass def test_func(m1): pass """ ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) assert request.fixturenames == "m1 f1".split() def test_func_closure_with_native_fixtures(self, testdir, monkeypatch): """Sanity check that verifies the order returned by the closures and the actual fixture execution order: The execution order may differ because of fixture inter-dependencies. """ monkeypatch.setattr(pytest, "FIXTURE_ORDER", [], raising=False) testdir.makepyfile( """ import pytest FIXTURE_ORDER = pytest.FIXTURE_ORDER @pytest.fixture(scope="session") def s1(): FIXTURE_ORDER.append('s1') @pytest.fixture(scope="package") def p1(): FIXTURE_ORDER.append('p1') @pytest.fixture(scope="module") def m1(): FIXTURE_ORDER.append('m1') @pytest.fixture(scope='session') def my_tmpdir_factory(): FIXTURE_ORDER.append('my_tmpdir_factory') @pytest.fixture def my_tmpdir(my_tmpdir_factory): FIXTURE_ORDER.append('my_tmpdir') @pytest.fixture def f1(my_tmpdir): FIXTURE_ORDER.append('f1') @pytest.fixture def f2(): FIXTURE_ORDER.append('f2') def test_foo(f1, p1, m1, f2, s1): pass """ ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) # order of fixtures based on their scope and position in the parameter list assert ( request.fixturenames == "s1 my_tmpdir_factory p1 m1 f1 f2 my_tmpdir".split() ) testdir.runpytest() # actual fixture execution differs: dependent fixtures must be created first ("my_tmpdir") assert ( pytest.FIXTURE_ORDER == "s1 my_tmpdir_factory p1 m1 my_tmpdir f1 f2".split() ) def test_func_closure_module(self, testdir): testdir.makepyfile( """ import pytest @pytest.fixture(scope='module') def m1(): pass @pytest.fixture(scope='function') def f1(): pass def test_func(f1, m1): pass """ ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) assert request.fixturenames == "m1 f1".split() def test_func_closure_scopes_reordered(self, testdir): """Test ensures that fixtures are ordered by scope regardless of the order of the parameters, although fixtures of same scope keep the declared order """ testdir.makepyfile( """ import pytest @pytest.fixture(scope='session') def s1(): pass @pytest.fixture(scope='module') def m1(): pass @pytest.fixture(scope='function') def f1(): pass @pytest.fixture(scope='function') def f2(): pass class Test: @pytest.fixture(scope='class') def c1(cls): pass def test_func(self, f2, f1, c1, m1, s1): pass """ ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) assert request.fixturenames == "s1 m1 c1 f2 f1".split() def test_func_closure_same_scope_closer_root_first(self, testdir): """Auto-use fixtures of same scope are ordered by closer-to-root first""" testdir.makeconftest( """ import pytest @pytest.fixture(scope='module', autouse=True) def m_conf(): pass """ ) testdir.makepyfile( **{ "sub/conftest.py": """ import pytest @pytest.fixture(scope='package', autouse=True) def p_sub(): pass @pytest.fixture(scope='module', autouse=True) def m_sub(): pass """, "sub/__init__.py": "", "sub/test_func.py": """ import pytest @pytest.fixture(scope='module', autouse=True) def m_test(): pass @pytest.fixture(scope='function') def f1(): pass def test_func(m_test, f1): pass """, } ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) assert request.fixturenames == "p_sub m_conf m_sub m_test f1".split() def test_func_closure_all_scopes_complex(self, testdir): """Complex test involving all scopes and mixing autouse with normal fixtures""" testdir.makeconftest( """ import pytest @pytest.fixture(scope='session') def s1(): pass @pytest.fixture(scope='package', autouse=True) def p1(): pass """ ) testdir.makepyfile(**{"__init__.py": ""}) testdir.makepyfile( """ import pytest @pytest.fixture(scope='module', autouse=True) def m1(): pass @pytest.fixture(scope='module') def m2(s1): pass @pytest.fixture(scope='function') def f1(): pass @pytest.fixture(scope='function') def f2(): pass class Test: @pytest.fixture(scope='class', autouse=True) def c1(self): pass def test_func(self, f2, f1, m2): pass """ ) items, _ = testdir.inline_genitems() request = FixtureRequest(items[0]) assert request.fixturenames == "s1 p1 m1 m2 c1 f2 f1".split() def test_multiple_packages(self, testdir): """Complex test involving multiple package fixtures. Make sure teardowns are executed in order. . └── root ├── __init__.py ├── sub1 │ ├── __init__.py │ ├── conftest.py │ └── test_1.py └── sub2 ├── __init__.py ├── conftest.py └── test_2.py """ root = testdir.mkdir("root") root.join("__init__.py").write("values = []") sub1 = root.mkdir("sub1") sub1.ensure("__init__.py") sub1.join("conftest.py").write( textwrap.dedent( """\ import pytest from .. import values @pytest.fixture(scope="package") def fix(): values.append("pre-sub1") yield values assert values.pop() == "pre-sub1" """ ) ) sub1.join("test_1.py").write( textwrap.dedent( """\ from .. import values def test_1(fix): assert values == ["pre-sub1"] """ ) ) sub2 = root.mkdir("sub2") sub2.ensure("__init__.py") sub2.join("conftest.py").write( textwrap.dedent( """\ import pytest from .. import values @pytest.fixture(scope="package") def fix(): values.append("pre-sub2") yield values assert values.pop() == "pre-sub2" """ ) ) sub2.join("test_2.py").write( textwrap.dedent( """\ from .. import values def test_2(fix): assert values == ["pre-sub2"] """ ) ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2)
pfctdayelise/pytest
testing/python/fixture.py
Python
mit
124,369
[ "VisIt" ]
40f860ce12a4c736e7650fb45f8dd39cdc82da8488ecd91e92afec33a420e420
#!/usr/bin/env python3 ######################################################################## # Solves problem 109 from projectEuler.net. # Counts how many distinct ways can a player checkout with a score # less than 100 in a game of darts. # Copyright (C) 2010 Santiago Alessandri # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # You can contact me at san.lt.ss@gmail.com # Visit my wiki at http://san-ss.wikidot.com ######################################################################## limit = 100 doubles = set(2 * x for x in range(1, 21)) doubles.add(50) casual = sorted([x for x in range(1,21)] + [2 * x for x in range(1, 21)] + [3 * x for x in range(1, 21)] + [25] + [50]) if __name__ == '__main__': result = 0 for score in range(2, limit): if score in doubles: result += 1 for first in casual: if first >= score: break result += (score - first) in doubles and 1 or 0 first = 0 while first < len(casual) and casual[first] * 2 < score: remain = score - casual[first] second = first while second < len(casual) and casual[second] < remain: result += (remain - casual[second]) in doubles and 1 or 0 second += 1 first += 1 print("The result is:", result)
sanSS/programming-contests
project-euler/problem109.py
Python
gpl-3.0
1,969
[ "VisIt" ]
6c5f381ecf6006e0fbb41eea7bd492b4495bbf8f1409b34929bbc9b6ff3ac95f
"""Compiles nodes from the parser into Python code.""" import typing as t from contextlib import contextmanager from functools import update_wrapper from io import StringIO from itertools import chain from keyword import iskeyword as is_python_keyword from markupsafe import escape from markupsafe import Markup from . import nodes from .exceptions import TemplateAssertionError from .idtracking import Symbols from .idtracking import VAR_LOAD_ALIAS from .idtracking import VAR_LOAD_PARAMETER from .idtracking import VAR_LOAD_RESOLVE from .idtracking import VAR_LOAD_UNDEFINED from .nodes import EvalContext from .optimizer import Optimizer from .utils import _PassArg from .utils import concat from .visitor import NodeVisitor if t.TYPE_CHECKING: import typing_extensions as te from .environment import Environment F = t.TypeVar("F", bound=t.Callable[..., t.Any]) operators = { "eq": "==", "ne": "!=", "gt": ">", "gteq": ">=", "lt": "<", "lteq": "<=", "in": "in", "notin": "not in", } def optimizeconst(f: F) -> F: def new_func( self: "CodeGenerator", node: nodes.Expr, frame: "Frame", **kwargs: t.Any ) -> t.Any: # Only optimize if the frame is not volatile if self.optimizer is not None and not frame.eval_ctx.volatile: new_node = self.optimizer.visit(node, frame.eval_ctx) if new_node != node: return self.visit(new_node, frame) return f(self, node, frame, **kwargs) return update_wrapper(t.cast(F, new_func), f) def _make_binop(op: str) -> t.Callable[["CodeGenerator", nodes.BinExpr, "Frame"], None]: @optimizeconst def visitor(self: "CodeGenerator", node: nodes.BinExpr, frame: Frame) -> None: if ( self.environment.sandboxed and op in self.environment.intercepted_binops # type: ignore ): self.write(f"environment.call_binop(context, {op!r}, ") self.visit(node.left, frame) self.write(", ") self.visit(node.right, frame) else: self.write("(") self.visit(node.left, frame) self.write(f" {op} ") self.visit(node.right, frame) self.write(")") return visitor def _make_unop( op: str, ) -> t.Callable[["CodeGenerator", nodes.UnaryExpr, "Frame"], None]: @optimizeconst def visitor(self: "CodeGenerator", node: nodes.UnaryExpr, frame: Frame) -> None: if ( self.environment.sandboxed and op in self.environment.intercepted_unops # type: ignore ): self.write(f"environment.call_unop(context, {op!r}, ") self.visit(node.node, frame) else: self.write("(" + op) self.visit(node.node, frame) self.write(")") return visitor def generate( node: nodes.Template, environment: "Environment", name: t.Optional[str], filename: t.Optional[str], stream: t.Optional[t.TextIO] = None, defer_init: bool = False, optimized: bool = True, ) -> t.Optional[str]: """Generate the python source for a node tree.""" if not isinstance(node, nodes.Template): raise TypeError("Can't compile non template nodes") generator = environment.code_generator_class( environment, name, filename, stream, defer_init, optimized ) generator.visit(node) if stream is None: return generator.stream.getvalue() # type: ignore return None def has_safe_repr(value: t.Any) -> bool: """Does the node have a safe representation?""" if value is None or value is NotImplemented or value is Ellipsis: return True if type(value) in {bool, int, float, complex, range, str, Markup}: return True if type(value) in {tuple, list, set, frozenset}: return all(has_safe_repr(v) for v in value) if type(value) is dict: return all(has_safe_repr(k) and has_safe_repr(v) for k, v in value.items()) return False def find_undeclared( nodes: t.Iterable[nodes.Node], names: t.Iterable[str] ) -> t.Set[str]: """Check if the names passed are accessed undeclared. The return value is a set of all the undeclared names from the sequence of names found. """ visitor = UndeclaredNameVisitor(names) try: for node in nodes: visitor.visit(node) except VisitorExit: pass return visitor.undeclared class MacroRef: def __init__(self, node: t.Union[nodes.Macro, nodes.CallBlock]) -> None: self.node = node self.accesses_caller = False self.accesses_kwargs = False self.accesses_varargs = False class Frame: """Holds compile time information for us.""" def __init__( self, eval_ctx: EvalContext, parent: t.Optional["Frame"] = None, level: t.Optional[int] = None, ) -> None: self.eval_ctx = eval_ctx # the parent of this frame self.parent = parent if parent is None: self.symbols = Symbols(level=level) # in some dynamic inheritance situations the compiler needs to add # write tests around output statements. self.require_output_check = False # inside some tags we are using a buffer rather than yield statements. # this for example affects {% filter %} or {% macro %}. If a frame # is buffered this variable points to the name of the list used as # buffer. self.buffer: t.Optional[str] = None # the name of the block we're in, otherwise None. self.block: t.Optional[str] = None else: self.symbols = Symbols(parent.symbols, level=level) self.require_output_check = parent.require_output_check self.buffer = parent.buffer self.block = parent.block # a toplevel frame is the root + soft frames such as if conditions. self.toplevel = False # the root frame is basically just the outermost frame, so no if # conditions. This information is used to optimize inheritance # situations. self.rootlevel = False # variables set inside of loops and blocks should not affect outer frames, # but they still needs to be kept track of as part of the active context. self.loop_frame = False self.block_frame = False # track whether the frame is being used in an if-statement or conditional # expression as it determines which errors should be raised during runtime # or compile time. self.soft_frame = False def copy(self) -> "Frame": """Create a copy of the current one.""" rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.symbols = self.symbols.copy() return rv def inner(self, isolated: bool = False) -> "Frame": """Return an inner frame.""" if isolated: return Frame(self.eval_ctx, level=self.symbols.level + 1) return Frame(self.eval_ctx, self) def soft(self) -> "Frame": """Return a soft frame. A soft frame may not be modified as standalone thing as it shares the resources with the frame it was created of, but it's not a rootlevel frame any longer. This is only used to implement if-statements and conditional expressions. """ rv = self.copy() rv.rootlevel = False rv.soft_frame = True return rv __copy__ = copy class VisitorExit(RuntimeError): """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" class DependencyFinderVisitor(NodeVisitor): """A visitor that collects filter and test calls.""" def __init__(self) -> None: self.filters: t.Set[str] = set() self.tests: t.Set[str] = set() def visit_Filter(self, node: nodes.Filter) -> None: self.generic_visit(node) self.filters.add(node.name) def visit_Test(self, node: nodes.Test) -> None: self.generic_visit(node) self.tests.add(node.name) def visit_Block(self, node: nodes.Block) -> None: """Stop visiting at blocks.""" class UndeclaredNameVisitor(NodeVisitor): """A visitor that checks if a name is accessed without being declared. This is different from the frame visitor as it will not stop at closure frames. """ def __init__(self, names: t.Iterable[str]) -> None: self.names = set(names) self.undeclared: t.Set[str] = set() def visit_Name(self, node: nodes.Name) -> None: if node.ctx == "load" and node.name in self.names: self.undeclared.add(node.name) if self.undeclared == self.names: raise VisitorExit() else: self.names.discard(node.name) def visit_Block(self, node: nodes.Block) -> None: """Stop visiting a blocks.""" class CompilerExit(Exception): """Raised if the compiler encountered a situation where it just doesn't make sense to further process the code. Any block that raises such an exception is not further processed. """ class CodeGenerator(NodeVisitor): def __init__( self, environment: "Environment", name: t.Optional[str], filename: t.Optional[str], stream: t.Optional[t.TextIO] = None, defer_init: bool = False, optimized: bool = True, ) -> None: if stream is None: stream = StringIO() self.environment = environment self.name = name self.filename = filename self.stream = stream self.created_block_context = False self.defer_init = defer_init self.optimizer: t.Optional[Optimizer] = None if optimized: self.optimizer = Optimizer(environment) # aliases for imports self.import_aliases: t.Dict[str, str] = {} # a registry for all blocks. Because blocks are moved out # into the global python scope they are registered here self.blocks: t.Dict[str, nodes.Block] = {} # the number of extends statements so far self.extends_so_far = 0 # some templates have a rootlevel extends. In this case we # can safely assume that we're a child template and do some # more optimizations. self.has_known_extends = False # the current line number self.code_lineno = 1 # registry of all filters and tests (global, not block local) self.tests: t.Dict[str, str] = {} self.filters: t.Dict[str, str] = {} # the debug information self.debug_info: t.List[t.Tuple[int, int]] = [] self._write_debug_info: t.Optional[int] = None # the number of new lines before the next write() self._new_lines = 0 # the line number of the last written statement self._last_line = 0 # true if nothing was written so far. self._first_write = True # used by the `temporary_identifier` method to get new # unique, temporary identifier self._last_identifier = 0 # the current indentation self._indentation = 0 # Tracks toplevel assignments self._assign_stack: t.List[t.Set[str]] = [] # Tracks parameter definition blocks self._param_def_block: t.List[t.Set[str]] = [] # Tracks the current context. self._context_reference_stack = ["context"] @property def optimized(self) -> bool: return self.optimizer is not None # -- Various compilation helpers def fail(self, msg: str, lineno: int) -> "te.NoReturn": """Fail with a :exc:`TemplateAssertionError`.""" raise TemplateAssertionError(msg, lineno, self.name, self.filename) def temporary_identifier(self) -> str: """Get a new unique identifier.""" self._last_identifier += 1 return f"t_{self._last_identifier}" def buffer(self, frame: Frame) -> None: """Enable buffering for the frame from that point onwards.""" frame.buffer = self.temporary_identifier() self.writeline(f"{frame.buffer} = []") def return_buffer_contents( self, frame: Frame, force_unescaped: bool = False ) -> None: """Return the buffer contents of the frame.""" if not force_unescaped: if frame.eval_ctx.volatile: self.writeline("if context.eval_ctx.autoescape:") self.indent() self.writeline(f"return Markup(concat({frame.buffer}))") self.outdent() self.writeline("else:") self.indent() self.writeline(f"return concat({frame.buffer})") self.outdent() return elif frame.eval_ctx.autoescape: self.writeline(f"return Markup(concat({frame.buffer}))") return self.writeline(f"return concat({frame.buffer})") def indent(self) -> None: """Indent by one.""" self._indentation += 1 def outdent(self, step: int = 1) -> None: """Outdent by step.""" self._indentation -= step def start_write(self, frame: Frame, node: t.Optional[nodes.Node] = None) -> None: """Yield or write into the frame buffer.""" if frame.buffer is None: self.writeline("yield ", node) else: self.writeline(f"{frame.buffer}.append(", node) def end_write(self, frame: Frame) -> None: """End the writing process started by `start_write`.""" if frame.buffer is not None: self.write(")") def simple_write( self, s: str, frame: Frame, node: t.Optional[nodes.Node] = None ) -> None: """Simple shortcut for start_write + write + end_write.""" self.start_write(frame, node) self.write(s) self.end_write(frame) def blockvisit(self, nodes: t.Iterable[nodes.Node], frame: Frame) -> None: """Visit a list of nodes as block in a frame. If the current frame is no buffer a dummy ``if 0: yield None`` is written automatically. """ try: self.writeline("pass") for node in nodes: self.visit(node, frame) except CompilerExit: pass def write(self, x: str) -> None: """Write a string into the output stream.""" if self._new_lines: if not self._first_write: self.stream.write("\n" * self._new_lines) self.code_lineno += self._new_lines if self._write_debug_info is not None: self.debug_info.append((self._write_debug_info, self.code_lineno)) self._write_debug_info = None self._first_write = False self.stream.write(" " * self._indentation) self._new_lines = 0 self.stream.write(x) def writeline( self, x: str, node: t.Optional[nodes.Node] = None, extra: int = 0 ) -> None: """Combination of newline and write.""" self.newline(node, extra) self.write(x) def newline(self, node: t.Optional[nodes.Node] = None, extra: int = 0) -> None: """Add one or more newlines before the next write.""" self._new_lines = max(self._new_lines, 1 + extra) if node is not None and node.lineno != self._last_line: self._write_debug_info = node.lineno self._last_line = node.lineno def signature( self, node: t.Union[nodes.Call, nodes.Filter, nodes.Test], frame: Frame, extra_kwargs: t.Optional[t.Mapping[str, t.Any]] = None, ) -> None: """Writes a function call to the stream for the current node. A leading comma is added automatically. The extra keyword arguments may not include python keywords otherwise a syntax error could occur. The extra keyword arguments should be given as python dict. """ # if any of the given keyword arguments is a python keyword # we have to make sure that no invalid call is created. kwarg_workaround = any( is_python_keyword(t.cast(str, k)) for k in chain((x.key for x in node.kwargs), extra_kwargs or ()) ) for arg in node.args: self.write(", ") self.visit(arg, frame) if not kwarg_workaround: for kwarg in node.kwargs: self.write(", ") self.visit(kwarg, frame) if extra_kwargs is not None: for key, value in extra_kwargs.items(): self.write(f", {key}={value}") if node.dyn_args: self.write(", *") self.visit(node.dyn_args, frame) if kwarg_workaround: if node.dyn_kwargs is not None: self.write(", **dict({") else: self.write(", **{") for kwarg in node.kwargs: self.write(f"{kwarg.key!r}: ") self.visit(kwarg.value, frame) self.write(", ") if extra_kwargs is not None: for key, value in extra_kwargs.items(): self.write(f"{key!r}: {value}, ") if node.dyn_kwargs is not None: self.write("}, **") self.visit(node.dyn_kwargs, frame) self.write(")") else: self.write("}") elif node.dyn_kwargs is not None: self.write(", **") self.visit(node.dyn_kwargs, frame) def pull_dependencies(self, nodes: t.Iterable[nodes.Node]) -> None: """Find all filter and test names used in the template and assign them to variables in the compiled namespace. Checking that the names are registered with the environment is done when compiling the Filter and Test nodes. If the node is in an If or CondExpr node, the check is done at runtime instead. .. versionchanged:: 3.0 Filters and tests in If and CondExpr nodes are checked at runtime instead of compile time. """ visitor = DependencyFinderVisitor() for node in nodes: visitor.visit(node) for id_map, names, dependency in (self.filters, visitor.filters, "filters"), ( self.tests, visitor.tests, "tests", ): for name in sorted(names): if name not in id_map: id_map[name] = self.temporary_identifier() # add check during runtime that dependencies used inside of executed # blocks are defined, as this step may be skipped during compile time self.writeline("try:") self.indent() self.writeline(f"{id_map[name]} = environment.{dependency}[{name!r}]") self.outdent() self.writeline("except KeyError:") self.indent() self.writeline("@internalcode") self.writeline(f"def {id_map[name]}(*unused):") self.indent() self.writeline( f'raise TemplateRuntimeError("No {dependency[:-1]}' f' named {name!r} found.")' ) self.outdent() self.outdent() def enter_frame(self, frame: Frame) -> None: undefs = [] for target, (action, param) in frame.symbols.loads.items(): if action == VAR_LOAD_PARAMETER: pass elif action == VAR_LOAD_RESOLVE: self.writeline(f"{target} = {self.get_resolve_func()}({param!r})") elif action == VAR_LOAD_ALIAS: self.writeline(f"{target} = {param}") elif action == VAR_LOAD_UNDEFINED: undefs.append(target) else: raise NotImplementedError("unknown load instruction") if undefs: self.writeline(f"{' = '.join(undefs)} = missing") def leave_frame(self, frame: Frame, with_python_scope: bool = False) -> None: if not with_python_scope: undefs = [] for target in frame.symbols.loads: undefs.append(target) if undefs: self.writeline(f"{' = '.join(undefs)} = missing") def choose_async(self, async_value: str = "async ", sync_value: str = "") -> str: return async_value if self.environment.is_async else sync_value def func(self, name: str) -> str: return f"{self.choose_async()}def {name}" def macro_body( self, node: t.Union[nodes.Macro, nodes.CallBlock], frame: Frame ) -> t.Tuple[Frame, MacroRef]: """Dump the function def of a macro or call block.""" frame = frame.inner() frame.symbols.analyze_node(node) macro_ref = MacroRef(node) explicit_caller = None skip_special_params = set() args = [] for idx, arg in enumerate(node.args): if arg.name == "caller": explicit_caller = idx if arg.name in ("kwargs", "varargs"): skip_special_params.add(arg.name) args.append(frame.symbols.ref(arg.name)) undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs")) if "caller" in undeclared: # In older Jinja versions there was a bug that allowed caller # to retain the special behavior even if it was mentioned in # the argument list. However thankfully this was only really # working if it was the last argument. So we are explicitly # checking this now and error out if it is anywhere else in # the argument list. if explicit_caller is not None: try: node.defaults[explicit_caller - len(node.args)] except IndexError: self.fail( "When defining macros or call blocks the " 'special "caller" argument must be omitted ' "or be given a default.", node.lineno, ) else: args.append(frame.symbols.declare_parameter("caller")) macro_ref.accesses_caller = True if "kwargs" in undeclared and "kwargs" not in skip_special_params: args.append(frame.symbols.declare_parameter("kwargs")) macro_ref.accesses_kwargs = True if "varargs" in undeclared and "varargs" not in skip_special_params: args.append(frame.symbols.declare_parameter("varargs")) macro_ref.accesses_varargs = True # macros are delayed, they never require output checks frame.require_output_check = False frame.symbols.analyze_node(node) self.writeline(f"{self.func('macro')}({', '.join(args)}):", node) self.indent() self.buffer(frame) self.enter_frame(frame) self.push_parameter_definitions(frame) for idx, arg in enumerate(node.args): ref = frame.symbols.ref(arg.name) self.writeline(f"if {ref} is missing:") self.indent() try: default = node.defaults[idx - len(node.args)] except IndexError: self.writeline( f'{ref} = undefined("parameter {arg.name!r} was not provided",' f" name={arg.name!r})" ) else: self.writeline(f"{ref} = ") self.visit(default, frame) self.mark_parameter_stored(ref) self.outdent() self.pop_parameter_definitions() self.blockvisit(node.body, frame) self.return_buffer_contents(frame, force_unescaped=True) self.leave_frame(frame, with_python_scope=True) self.outdent() return frame, macro_ref def macro_def(self, macro_ref: MacroRef, frame: Frame) -> None: """Dump the macro definition for the def created by macro_body.""" arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args) name = getattr(macro_ref.node, "name", None) if len(macro_ref.node.args) == 1: arg_tuple += "," self.write( f"Macro(environment, macro, {name!r}, ({arg_tuple})," f" {macro_ref.accesses_kwargs!r}, {macro_ref.accesses_varargs!r}," f" {macro_ref.accesses_caller!r}, context.eval_ctx.autoescape)" ) def position(self, node: nodes.Node) -> str: """Return a human readable position for the node.""" rv = f"line {node.lineno}" if self.name is not None: rv = f"{rv} in {self.name!r}" return rv def dump_local_context(self, frame: Frame) -> str: items_kv = ", ".join( f"{name!r}: {target}" for name, target in frame.symbols.dump_stores().items() ) return f"{{{items_kv}}}" def write_commons(self) -> None: """Writes a common preamble that is used by root and block functions. Primarily this sets up common local helpers and enforces a generator through a dead branch. """ self.writeline("resolve = context.resolve_or_missing") self.writeline("undefined = environment.undefined") self.writeline("concat = environment.concat") # always use the standard Undefined class for the implicit else of # conditional expressions self.writeline("cond_expr_undefined = Undefined") self.writeline("if 0: yield None") def push_parameter_definitions(self, frame: Frame) -> None: """Pushes all parameter targets from the given frame into a local stack that permits tracking of yet to be assigned parameters. In particular this enables the optimization from `visit_Name` to skip undefined expressions for parameters in macros as macros can reference otherwise unbound parameters. """ self._param_def_block.append(frame.symbols.dump_param_targets()) def pop_parameter_definitions(self) -> None: """Pops the current parameter definitions set.""" self._param_def_block.pop() def mark_parameter_stored(self, target: str) -> None: """Marks a parameter in the current parameter definitions as stored. This will skip the enforced undefined checks. """ if self._param_def_block: self._param_def_block[-1].discard(target) def push_context_reference(self, target: str) -> None: self._context_reference_stack.append(target) def pop_context_reference(self) -> None: self._context_reference_stack.pop() def get_context_ref(self) -> str: return self._context_reference_stack[-1] def get_resolve_func(self) -> str: target = self._context_reference_stack[-1] if target == "context": return "resolve" return f"{target}.resolve" def derive_context(self, frame: Frame) -> str: return f"{self.get_context_ref()}.derived({self.dump_local_context(frame)})" def parameter_is_undeclared(self, target: str) -> bool: """Checks if a given target is an undeclared parameter.""" if not self._param_def_block: return False return target in self._param_def_block[-1] def push_assign_tracking(self) -> None: """Pushes a new layer for assignment tracking.""" self._assign_stack.append(set()) def pop_assign_tracking(self, frame: Frame) -> None: """Pops the topmost level for assignment tracking and updates the context variables if necessary. """ vars = self._assign_stack.pop() if ( not frame.block_frame and not frame.loop_frame and not frame.toplevel or not vars ): return public_names = [x for x in vars if x[:1] != "_"] if len(vars) == 1: name = next(iter(vars)) ref = frame.symbols.ref(name) if frame.loop_frame: self.writeline(f"_loop_vars[{name!r}] = {ref}") return if frame.block_frame: self.writeline(f"_block_vars[{name!r}] = {ref}") return self.writeline(f"context.vars[{name!r}] = {ref}") else: if frame.loop_frame: self.writeline("_loop_vars.update({") elif frame.block_frame: self.writeline("_block_vars.update({") else: self.writeline("context.vars.update({") for idx, name in enumerate(vars): if idx: self.write(", ") ref = frame.symbols.ref(name) self.write(f"{name!r}: {ref}") self.write("})") if not frame.block_frame and not frame.loop_frame and public_names: if len(public_names) == 1: self.writeline(f"context.exported_vars.add({public_names[0]!r})") else: names_str = ", ".join(map(repr, public_names)) self.writeline(f"context.exported_vars.update(({names_str}))") # -- Statement Visitors def visit_Template( self, node: nodes.Template, frame: t.Optional[Frame] = None ) -> None: assert frame is None, "no root frame allowed" eval_ctx = EvalContext(self.environment, self.name) from .runtime import exported, async_exported if self.environment.is_async: exported_names = sorted(exported + async_exported) else: exported_names = sorted(exported) self.writeline("from jinja2.runtime import " + ", ".join(exported_names)) # if we want a deferred initialization we cannot move the # environment into a local name envenv = "" if self.defer_init else ", environment=environment" # do we have an extends tag at all? If not, we can save some # overhead by just not processing any inheritance code. have_extends = node.find(nodes.Extends) is not None # find all blocks for block in node.find_all(nodes.Block): if block.name in self.blocks: self.fail(f"block {block.name!r} defined twice", block.lineno) self.blocks[block.name] = block # find all imports and import them for import_ in node.find_all(nodes.ImportedName): if import_.importname not in self.import_aliases: imp = import_.importname self.import_aliases[imp] = alias = self.temporary_identifier() if "." in imp: module, obj = imp.rsplit(".", 1) self.writeline(f"from {module} import {obj} as {alias}") else: self.writeline(f"import {imp} as {alias}") # add the load name self.writeline(f"name = {self.name!r}") # generate the root render function. self.writeline( f"{self.func('root')}(context, missing=missing{envenv}):", extra=1 ) self.indent() self.write_commons() # process the root frame = Frame(eval_ctx) if "self" in find_undeclared(node.body, ("self",)): ref = frame.symbols.declare_parameter("self") self.writeline(f"{ref} = TemplateReference(context)") frame.symbols.analyze_node(node) frame.toplevel = frame.rootlevel = True frame.require_output_check = have_extends and not self.has_known_extends if have_extends: self.writeline("parent_template = None") self.enter_frame(frame) self.pull_dependencies(node.body) self.blockvisit(node.body, frame) self.leave_frame(frame, with_python_scope=True) self.outdent() # make sure that the parent root is called. if have_extends: if not self.has_known_extends: self.indent() self.writeline("if parent_template is not None:") self.indent() if not self.environment.is_async: self.writeline("yield from parent_template.root_render_func(context)") else: self.writeline( "async for event in parent_template.root_render_func(context):" ) self.indent() self.writeline("yield event") self.outdent() self.outdent(1 + (not self.has_known_extends)) # at this point we now have the blocks collected and can visit them too. for name, block in self.blocks.items(): self.writeline( f"{self.func('block_' + name)}(context, missing=missing{envenv}):", block, 1, ) self.indent() self.write_commons() # It's important that we do not make this frame a child of the # toplevel template. This would cause a variety of # interesting issues with identifier tracking. block_frame = Frame(eval_ctx) block_frame.block_frame = True undeclared = find_undeclared(block.body, ("self", "super")) if "self" in undeclared: ref = block_frame.symbols.declare_parameter("self") self.writeline(f"{ref} = TemplateReference(context)") if "super" in undeclared: ref = block_frame.symbols.declare_parameter("super") self.writeline(f"{ref} = context.super({name!r}, block_{name})") block_frame.symbols.analyze_node(block) block_frame.block = name self.writeline("_block_vars = {}") self.enter_frame(block_frame) self.pull_dependencies(block.body) self.blockvisit(block.body, block_frame) self.leave_frame(block_frame, with_python_scope=True) self.outdent() blocks_kv_str = ", ".join(f"{x!r}: block_{x}" for x in self.blocks) self.writeline(f"blocks = {{{blocks_kv_str}}}", extra=1) debug_kv_str = "&".join(f"{k}={v}" for k, v in self.debug_info) self.writeline(f"debug_info = {debug_kv_str!r}") def visit_Block(self, node: nodes.Block, frame: Frame) -> None: """Call a block and register it for the template.""" level = 0 if frame.toplevel: # if we know that we are a child template, there is no need to # check if we are one if self.has_known_extends: return if self.extends_so_far > 0: self.writeline("if parent_template is None:") self.indent() level += 1 if node.scoped: context = self.derive_context(frame) else: context = self.get_context_ref() if node.required: self.writeline(f"if len(context.blocks[{node.name!r}]) <= 1:", node) self.indent() self.writeline( f'raise TemplateRuntimeError("Required block {node.name!r} not found")', node, ) self.outdent() if not self.environment.is_async and frame.buffer is None: self.writeline( f"yield from context.blocks[{node.name!r}][0]({context})", node ) else: self.writeline( f"{self.choose_async()}for event in" f" context.blocks[{node.name!r}][0]({context}):", node, ) self.indent() self.simple_write("event", frame) self.outdent() self.outdent(level) def visit_Extends(self, node: nodes.Extends, frame: Frame) -> None: """Calls the extender.""" if not frame.toplevel: self.fail("cannot use extend from a non top-level scope", node.lineno) # if the number of extends statements in general is zero so # far, we don't have to add a check if something extended # the template before this one. if self.extends_so_far > 0: # if we have a known extends we just add a template runtime # error into the generated code. We could catch that at compile # time too, but i welcome it not to confuse users by throwing the # same error at different times just "because we can". if not self.has_known_extends: self.writeline("if parent_template is not None:") self.indent() self.writeline('raise TemplateRuntimeError("extended multiple times")') # if we have a known extends already we don't need that code here # as we know that the template execution will end here. if self.has_known_extends: raise CompilerExit() else: self.outdent() self.writeline("parent_template = environment.get_template(", node) self.visit(node.template, frame) self.write(f", {self.name!r})") self.writeline("for name, parent_block in parent_template.blocks.items():") self.indent() self.writeline("context.blocks.setdefault(name, []).append(parent_block)") self.outdent() # if this extends statement was in the root level we can take # advantage of that information and simplify the generated code # in the top level from this point onwards if frame.rootlevel: self.has_known_extends = True # and now we have one more self.extends_so_far += 1 def visit_Include(self, node: nodes.Include, frame: Frame) -> None: """Handles includes.""" if node.ignore_missing: self.writeline("try:") self.indent() func_name = "get_or_select_template" if isinstance(node.template, nodes.Const): if isinstance(node.template.value, str): func_name = "get_template" elif isinstance(node.template.value, (tuple, list)): func_name = "select_template" elif isinstance(node.template, (nodes.Tuple, nodes.List)): func_name = "select_template" self.writeline(f"template = environment.{func_name}(", node) self.visit(node.template, frame) self.write(f", {self.name!r})") if node.ignore_missing: self.outdent() self.writeline("except TemplateNotFound:") self.indent() self.writeline("pass") self.outdent() self.writeline("else:") self.indent() skip_event_yield = False if node.with_context: self.writeline( f"{self.choose_async()}for event in template.root_render_func(" "template.new_context(context.get_all(), True," f" {self.dump_local_context(frame)})):" ) elif self.environment.is_async: self.writeline( "for event in (await template._get_default_module_async())" "._body_stream:" ) else: self.writeline("yield from template._get_default_module()._body_stream") skip_event_yield = True if not skip_event_yield: self.indent() self.simple_write("event", frame) self.outdent() if node.ignore_missing: self.outdent() def _import_common( self, node: t.Union[nodes.Import, nodes.FromImport], frame: Frame ) -> None: self.write(f"{self.choose_async('await ')}environment.get_template(") self.visit(node.template, frame) self.write(f", {self.name!r}).") if node.with_context: f_name = f"make_module{self.choose_async('_async')}" self.write( f"{f_name}(context.get_all(), True, {self.dump_local_context(frame)})" ) else: self.write(f"_get_default_module{self.choose_async('_async')}(context)") def visit_Import(self, node: nodes.Import, frame: Frame) -> None: """Visit regular imports.""" self.writeline(f"{frame.symbols.ref(node.target)} = ", node) if frame.toplevel: self.write(f"context.vars[{node.target!r}] = ") self._import_common(node, frame) if frame.toplevel and not node.target.startswith("_"): self.writeline(f"context.exported_vars.discard({node.target!r})") def visit_FromImport(self, node: nodes.FromImport, frame: Frame) -> None: """Visit named imports.""" self.newline(node) self.write("included_template = ") self._import_common(node, frame) var_names = [] discarded_names = [] for name in node.names: if isinstance(name, tuple): name, alias = name else: alias = name self.writeline( f"{frame.symbols.ref(alias)} =" f" getattr(included_template, {name!r}, missing)" ) self.writeline(f"if {frame.symbols.ref(alias)} is missing:") self.indent() message = ( "the template {included_template.__name__!r}" f" (imported on {self.position(node)})" f" does not export the requested name {name!r}" ) self.writeline( f"{frame.symbols.ref(alias)} = undefined(f{message!r}, name={name!r})" ) self.outdent() if frame.toplevel: var_names.append(alias) if not alias.startswith("_"): discarded_names.append(alias) if var_names: if len(var_names) == 1: name = var_names[0] self.writeline(f"context.vars[{name!r}] = {frame.symbols.ref(name)}") else: names_kv = ", ".join( f"{name!r}: {frame.symbols.ref(name)}" for name in var_names ) self.writeline(f"context.vars.update({{{names_kv}}})") if discarded_names: if len(discarded_names) == 1: self.writeline(f"context.exported_vars.discard({discarded_names[0]!r})") else: names_str = ", ".join(map(repr, discarded_names)) self.writeline( f"context.exported_vars.difference_update(({names_str}))" ) def visit_For(self, node: nodes.For, frame: Frame) -> None: loop_frame = frame.inner() loop_frame.loop_frame = True test_frame = frame.inner() else_frame = frame.inner() # try to figure out if we have an extended loop. An extended loop # is necessary if the loop is in recursive mode if the special loop # variable is accessed in the body if the body is a scoped block. extended_loop = ( node.recursive or "loop" in find_undeclared(node.iter_child_nodes(only=("body",)), ("loop",)) or any(block.scoped for block in node.find_all(nodes.Block)) ) loop_ref = None if extended_loop: loop_ref = loop_frame.symbols.declare_parameter("loop") loop_frame.symbols.analyze_node(node, for_branch="body") if node.else_: else_frame.symbols.analyze_node(node, for_branch="else") if node.test: loop_filter_func = self.temporary_identifier() test_frame.symbols.analyze_node(node, for_branch="test") self.writeline(f"{self.func(loop_filter_func)}(fiter):", node.test) self.indent() self.enter_frame(test_frame) self.writeline(self.choose_async("async for ", "for ")) self.visit(node.target, loop_frame) self.write(" in ") self.write(self.choose_async("auto_aiter(fiter)", "fiter")) self.write(":") self.indent() self.writeline("if ", node.test) self.visit(node.test, test_frame) self.write(":") self.indent() self.writeline("yield ") self.visit(node.target, loop_frame) self.outdent(3) self.leave_frame(test_frame, with_python_scope=True) # if we don't have an recursive loop we have to find the shadowed # variables at that point. Because loops can be nested but the loop # variable is a special one we have to enforce aliasing for it. if node.recursive: self.writeline( f"{self.func('loop')}(reciter, loop_render_func, depth=0):", node ) self.indent() self.buffer(loop_frame) # Use the same buffer for the else frame else_frame.buffer = loop_frame.buffer # make sure the loop variable is a special one and raise a template # assertion error if a loop tries to write to loop if extended_loop: self.writeline(f"{loop_ref} = missing") for name in node.find_all(nodes.Name): if name.ctx == "store" and name.name == "loop": self.fail( "Can't assign to special loop variable in for-loop target", name.lineno, ) if node.else_: iteration_indicator = self.temporary_identifier() self.writeline(f"{iteration_indicator} = 1") self.writeline(self.choose_async("async for ", "for "), node) self.visit(node.target, loop_frame) if extended_loop: self.write(f", {loop_ref} in {self.choose_async('Async')}LoopContext(") else: self.write(" in ") if node.test: self.write(f"{loop_filter_func}(") if node.recursive: self.write("reciter") else: if self.environment.is_async and not extended_loop: self.write("auto_aiter(") self.visit(node.iter, frame) if self.environment.is_async and not extended_loop: self.write(")") if node.test: self.write(")") if node.recursive: self.write(", undefined, loop_render_func, depth):") else: self.write(", undefined):" if extended_loop else ":") self.indent() self.enter_frame(loop_frame) self.writeline("_loop_vars = {}") self.blockvisit(node.body, loop_frame) if node.else_: self.writeline(f"{iteration_indicator} = 0") self.outdent() self.leave_frame( loop_frame, with_python_scope=node.recursive and not node.else_ ) if node.else_: self.writeline(f"if {iteration_indicator}:") self.indent() self.enter_frame(else_frame) self.blockvisit(node.else_, else_frame) self.leave_frame(else_frame) self.outdent() # if the node was recursive we have to return the buffer contents # and start the iteration code if node.recursive: self.return_buffer_contents(loop_frame) self.outdent() self.start_write(frame, node) self.write(f"{self.choose_async('await ')}loop(") if self.environment.is_async: self.write("auto_aiter(") self.visit(node.iter, frame) if self.environment.is_async: self.write(")") self.write(", loop)") self.end_write(frame) # at the end of the iteration, clear any assignments made in the # loop from the top level if self._assign_stack: self._assign_stack[-1].difference_update(loop_frame.symbols.stores) def visit_If(self, node: nodes.If, frame: Frame) -> None: if_frame = frame.soft() self.writeline("if ", node) self.visit(node.test, if_frame) self.write(":") self.indent() self.blockvisit(node.body, if_frame) self.outdent() for elif_ in node.elif_: self.writeline("elif ", elif_) self.visit(elif_.test, if_frame) self.write(":") self.indent() self.blockvisit(elif_.body, if_frame) self.outdent() if node.else_: self.writeline("else:") self.indent() self.blockvisit(node.else_, if_frame) self.outdent() def visit_Macro(self, node: nodes.Macro, frame: Frame) -> None: macro_frame, macro_ref = self.macro_body(node, frame) self.newline() if frame.toplevel: if not node.name.startswith("_"): self.write(f"context.exported_vars.add({node.name!r})") self.writeline(f"context.vars[{node.name!r}] = ") self.write(f"{frame.symbols.ref(node.name)} = ") self.macro_def(macro_ref, macro_frame) def visit_CallBlock(self, node: nodes.CallBlock, frame: Frame) -> None: call_frame, macro_ref = self.macro_body(node, frame) self.writeline("caller = ") self.macro_def(macro_ref, call_frame) self.start_write(frame, node) self.visit_Call(node.call, frame, forward_caller=True) self.end_write(frame) def visit_FilterBlock(self, node: nodes.FilterBlock, frame: Frame) -> None: filter_frame = frame.inner() filter_frame.symbols.analyze_node(node) self.enter_frame(filter_frame) self.buffer(filter_frame) self.blockvisit(node.body, filter_frame) self.start_write(frame, node) self.visit_Filter(node.filter, filter_frame) self.end_write(frame) self.leave_frame(filter_frame) def visit_With(self, node: nodes.With, frame: Frame) -> None: with_frame = frame.inner() with_frame.symbols.analyze_node(node) self.enter_frame(with_frame) for target, expr in zip(node.targets, node.values): self.newline() self.visit(target, with_frame) self.write(" = ") self.visit(expr, frame) self.blockvisit(node.body, with_frame) self.leave_frame(with_frame) def visit_ExprStmt(self, node: nodes.ExprStmt, frame: Frame) -> None: self.newline(node) self.visit(node.node, frame) class _FinalizeInfo(t.NamedTuple): const: t.Optional[t.Callable[..., str]] src: t.Optional[str] @staticmethod def _default_finalize(value: t.Any) -> t.Any: """The default finalize function if the environment isn't configured with one. Or, if the environment has one, this is called on that function's output for constants. """ return str(value) _finalize: t.Optional[_FinalizeInfo] = None def _make_finalize(self) -> _FinalizeInfo: """Build the finalize function to be used on constants and at runtime. Cached so it's only created once for all output nodes. Returns a ``namedtuple`` with the following attributes: ``const`` A function to finalize constant data at compile time. ``src`` Source code to output around nodes to be evaluated at runtime. """ if self._finalize is not None: return self._finalize finalize: t.Optional[t.Callable[..., t.Any]] finalize = default = self._default_finalize src = None if self.environment.finalize: src = "environment.finalize(" env_finalize = self.environment.finalize pass_arg = { _PassArg.context: "context", _PassArg.eval_context: "context.eval_ctx", _PassArg.environment: "environment", }.get( _PassArg.from_obj(env_finalize) # type: ignore ) finalize = None if pass_arg is None: def finalize(value: t.Any) -> t.Any: return default(env_finalize(value)) else: src = f"{src}{pass_arg}, " if pass_arg == "environment": def finalize(value: t.Any) -> t.Any: return default(env_finalize(self.environment, value)) self._finalize = self._FinalizeInfo(finalize, src) return self._finalize def _output_const_repr(self, group: t.Iterable[t.Any]) -> str: """Given a group of constant values converted from ``Output`` child nodes, produce a string to write to the template module source. """ return repr(concat(group)) def _output_child_to_const( self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo ) -> str: """Try to optimize a child of an ``Output`` node by trying to convert it to constant, finalized data at compile time. If :exc:`Impossible` is raised, the node is not constant and will be evaluated at runtime. Any other exception will also be evaluated at runtime for easier debugging. """ const = node.as_const(frame.eval_ctx) if frame.eval_ctx.autoescape: const = escape(const) # Template data doesn't go through finalize. if isinstance(node, nodes.TemplateData): return str(const) return finalize.const(const) # type: ignore def _output_child_pre( self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo ) -> None: """Output extra source code before visiting a child of an ``Output`` node. """ if frame.eval_ctx.volatile: self.write("(escape if context.eval_ctx.autoescape else str)(") elif frame.eval_ctx.autoescape: self.write("escape(") else: self.write("str(") if finalize.src is not None: self.write(finalize.src) def _output_child_post( self, node: nodes.Expr, frame: Frame, finalize: _FinalizeInfo ) -> None: """Output extra source code after visiting a child of an ``Output`` node. """ self.write(")") if finalize.src is not None: self.write(")") def visit_Output(self, node: nodes.Output, frame: Frame) -> None: # If an extends is active, don't render outside a block. if frame.require_output_check: # A top-level extends is known to exist at compile time. if self.has_known_extends: return self.writeline("if parent_template is None:") self.indent() finalize = self._make_finalize() body: t.List[t.Union[t.List[t.Any], nodes.Expr]] = [] # Evaluate constants at compile time if possible. Each item in # body will be either a list of static data or a node to be # evaluated at runtime. for child in node.nodes: try: if not ( # If the finalize function requires runtime context, # constants can't be evaluated at compile time. finalize.const # Unless it's basic template data that won't be # finalized anyway. or isinstance(child, nodes.TemplateData) ): raise nodes.Impossible() const = self._output_child_to_const(child, frame, finalize) except (nodes.Impossible, Exception): # The node was not constant and needs to be evaluated at # runtime. Or another error was raised, which is easier # to debug at runtime. body.append(child) continue if body and isinstance(body[-1], list): body[-1].append(const) else: body.append([const]) if frame.buffer is not None: if len(body) == 1: self.writeline(f"{frame.buffer}.append(") else: self.writeline(f"{frame.buffer}.extend((") self.indent() for item in body: if isinstance(item, list): # A group of constant data to join and output. val = self._output_const_repr(item) if frame.buffer is None: self.writeline("yield " + val) else: self.writeline(val + ",") else: if frame.buffer is None: self.writeline("yield ", item) else: self.newline(item) # A node to be evaluated at runtime. self._output_child_pre(item, frame, finalize) self.visit(item, frame) self._output_child_post(item, frame, finalize) if frame.buffer is not None: self.write(",") if frame.buffer is not None: self.outdent() self.writeline(")" if len(body) == 1 else "))") if frame.require_output_check: self.outdent() def visit_Assign(self, node: nodes.Assign, frame: Frame) -> None: self.push_assign_tracking() self.newline(node) self.visit(node.target, frame) self.write(" = ") self.visit(node.node, frame) self.pop_assign_tracking(frame) def visit_AssignBlock(self, node: nodes.AssignBlock, frame: Frame) -> None: self.push_assign_tracking() block_frame = frame.inner() # This is a special case. Since a set block always captures we # will disable output checks. This way one can use set blocks # toplevel even in extended templates. block_frame.require_output_check = False block_frame.symbols.analyze_node(node) self.enter_frame(block_frame) self.buffer(block_frame) self.blockvisit(node.body, block_frame) self.newline(node) self.visit(node.target, frame) self.write(" = (Markup if context.eval_ctx.autoescape else identity)(") if node.filter is not None: self.visit_Filter(node.filter, block_frame) else: self.write(f"concat({block_frame.buffer})") self.write(")") self.pop_assign_tracking(frame) self.leave_frame(block_frame) # -- Expression Visitors def visit_Name(self, node: nodes.Name, frame: Frame) -> None: if node.ctx == "store" and ( frame.toplevel or frame.loop_frame or frame.block_frame ): if self._assign_stack: self._assign_stack[-1].add(node.name) ref = frame.symbols.ref(node.name) # If we are looking up a variable we might have to deal with the # case where it's undefined. We can skip that case if the load # instruction indicates a parameter which are always defined. if node.ctx == "load": load = frame.symbols.find_load(ref) if not ( load is not None and load[0] == VAR_LOAD_PARAMETER and not self.parameter_is_undeclared(ref) ): self.write( f"(undefined(name={node.name!r}) if {ref} is missing else {ref})" ) return self.write(ref) def visit_NSRef(self, node: nodes.NSRef, frame: Frame) -> None: # NSRefs can only be used to store values; since they use the normal # `foo.bar` notation they will be parsed as a normal attribute access # when used anywhere but in a `set` context ref = frame.symbols.ref(node.name) self.writeline(f"if not isinstance({ref}, Namespace):") self.indent() self.writeline( "raise TemplateRuntimeError" '("cannot assign attribute on non-namespace object")' ) self.outdent() self.writeline(f"{ref}[{node.attr!r}]") def visit_Const(self, node: nodes.Const, frame: Frame) -> None: val = node.as_const(frame.eval_ctx) if isinstance(val, float): self.write(str(val)) else: self.write(repr(val)) def visit_TemplateData(self, node: nodes.TemplateData, frame: Frame) -> None: try: self.write(repr(node.as_const(frame.eval_ctx))) except nodes.Impossible: self.write( f"(Markup if context.eval_ctx.autoescape else identity)({node.data!r})" ) def visit_Tuple(self, node: nodes.Tuple, frame: Frame) -> None: self.write("(") idx = -1 for idx, item in enumerate(node.items): if idx: self.write(", ") self.visit(item, frame) self.write(",)" if idx == 0 else ")") def visit_List(self, node: nodes.List, frame: Frame) -> None: self.write("[") for idx, item in enumerate(node.items): if idx: self.write(", ") self.visit(item, frame) self.write("]") def visit_Dict(self, node: nodes.Dict, frame: Frame) -> None: self.write("{") for idx, item in enumerate(node.items): if idx: self.write(", ") self.visit(item.key, frame) self.write(": ") self.visit(item.value, frame) self.write("}") visit_Add = _make_binop("+") visit_Sub = _make_binop("-") visit_Mul = _make_binop("*") visit_Div = _make_binop("/") visit_FloorDiv = _make_binop("//") visit_Pow = _make_binop("**") visit_Mod = _make_binop("%") visit_And = _make_binop("and") visit_Or = _make_binop("or") visit_Pos = _make_unop("+") visit_Neg = _make_unop("-") visit_Not = _make_unop("not ") @optimizeconst def visit_Concat(self, node: nodes.Concat, frame: Frame) -> None: if frame.eval_ctx.volatile: func_name = "(markup_join if context.eval_ctx.volatile else str_join)" elif frame.eval_ctx.autoescape: func_name = "markup_join" else: func_name = "str_join" self.write(f"{func_name}((") for arg in node.nodes: self.visit(arg, frame) self.write(", ") self.write("))") @optimizeconst def visit_Compare(self, node: nodes.Compare, frame: Frame) -> None: self.write("(") self.visit(node.expr, frame) for op in node.ops: self.visit(op, frame) self.write(")") def visit_Operand(self, node: nodes.Operand, frame: Frame) -> None: self.write(f" {operators[node.op]} ") self.visit(node.expr, frame) @optimizeconst def visit_Getattr(self, node: nodes.Getattr, frame: Frame) -> None: if self.environment.is_async: self.write("(await auto_await(") self.write("environment.getattr(") self.visit(node.node, frame) self.write(f", {node.attr!r})") if self.environment.is_async: self.write("))") @optimizeconst def visit_Getitem(self, node: nodes.Getitem, frame: Frame) -> None: # slices bypass the environment getitem method. if isinstance(node.arg, nodes.Slice): self.visit(node.node, frame) self.write("[") self.visit(node.arg, frame) self.write("]") else: if self.environment.is_async: self.write("(await auto_await(") self.write("environment.getitem(") self.visit(node.node, frame) self.write(", ") self.visit(node.arg, frame) self.write(")") if self.environment.is_async: self.write("))") def visit_Slice(self, node: nodes.Slice, frame: Frame) -> None: if node.start is not None: self.visit(node.start, frame) self.write(":") if node.stop is not None: self.visit(node.stop, frame) if node.step is not None: self.write(":") self.visit(node.step, frame) @contextmanager def _filter_test_common( self, node: t.Union[nodes.Filter, nodes.Test], frame: Frame, is_filter: bool ) -> t.Iterator[None]: if self.environment.is_async: self.write("(await auto_await(") if is_filter: self.write(f"{self.filters[node.name]}(") func = self.environment.filters.get(node.name) else: self.write(f"{self.tests[node.name]}(") func = self.environment.tests.get(node.name) # When inside an If or CondExpr frame, allow the filter to be # undefined at compile time and only raise an error if it's # actually called at runtime. See pull_dependencies. if func is None and not frame.soft_frame: type_name = "filter" if is_filter else "test" self.fail(f"No {type_name} named {node.name!r}.", node.lineno) pass_arg = { _PassArg.context: "context", _PassArg.eval_context: "context.eval_ctx", _PassArg.environment: "environment", }.get( _PassArg.from_obj(func) # type: ignore ) if pass_arg is not None: self.write(f"{pass_arg}, ") # Back to the visitor function to handle visiting the target of # the filter or test. yield self.signature(node, frame) self.write(")") if self.environment.is_async: self.write("))") @optimizeconst def visit_Filter(self, node: nodes.Filter, frame: Frame) -> None: with self._filter_test_common(node, frame, True): # if the filter node is None we are inside a filter block # and want to write to the current buffer if node.node is not None: self.visit(node.node, frame) elif frame.eval_ctx.volatile: self.write( f"(Markup(concat({frame.buffer}))" f" if context.eval_ctx.autoescape else concat({frame.buffer}))" ) elif frame.eval_ctx.autoescape: self.write(f"Markup(concat({frame.buffer}))") else: self.write(f"concat({frame.buffer})") @optimizeconst def visit_Test(self, node: nodes.Test, frame: Frame) -> None: with self._filter_test_common(node, frame, False): self.visit(node.node, frame) @optimizeconst def visit_CondExpr(self, node: nodes.CondExpr, frame: Frame) -> None: frame = frame.soft() def write_expr2() -> None: if node.expr2 is not None: self.visit(node.expr2, frame) return self.write( f'cond_expr_undefined("the inline if-expression on' f" {self.position(node)} evaluated to false and no else" f' section was defined.")' ) self.write("(") self.visit(node.expr1, frame) self.write(" if ") self.visit(node.test, frame) self.write(" else ") write_expr2() self.write(")") @optimizeconst def visit_Call( self, node: nodes.Call, frame: Frame, forward_caller: bool = False ) -> None: if self.environment.is_async: self.write("(await auto_await(") if self.environment.sandboxed: self.write("environment.call(context, ") else: self.write("context.call(") self.visit(node.node, frame) extra_kwargs = {"caller": "caller"} if forward_caller else None loop_kwargs = {"_loop_vars": "_loop_vars"} if frame.loop_frame else {} block_kwargs = {"_block_vars": "_block_vars"} if frame.block_frame else {} if extra_kwargs: extra_kwargs.update(loop_kwargs, **block_kwargs) elif loop_kwargs or block_kwargs: extra_kwargs = dict(loop_kwargs, **block_kwargs) self.signature(node, frame, extra_kwargs) self.write(")") if self.environment.is_async: self.write("))") def visit_Keyword(self, node: nodes.Keyword, frame: Frame) -> None: self.write(node.key + "=") self.visit(node.value, frame) # -- Unused nodes for extensions def visit_MarkSafe(self, node: nodes.MarkSafe, frame: Frame) -> None: self.write("Markup(") self.visit(node.expr, frame) self.write(")") def visit_MarkSafeIfAutoescape( self, node: nodes.MarkSafeIfAutoescape, frame: Frame ) -> None: self.write("(Markup if context.eval_ctx.autoescape else identity)(") self.visit(node.expr, frame) self.write(")") def visit_EnvironmentAttribute( self, node: nodes.EnvironmentAttribute, frame: Frame ) -> None: self.write("environment." + node.name) def visit_ExtensionAttribute( self, node: nodes.ExtensionAttribute, frame: Frame ) -> None: self.write(f"environment.extensions[{node.identifier!r}].{node.name}") def visit_ImportedName(self, node: nodes.ImportedName, frame: Frame) -> None: self.write(self.import_aliases[node.importname]) def visit_InternalName(self, node: nodes.InternalName, frame: Frame) -> None: self.write(node.name) def visit_ContextReference( self, node: nodes.ContextReference, frame: Frame ) -> None: self.write("context") def visit_DerivedContextReference( self, node: nodes.DerivedContextReference, frame: Frame ) -> None: self.write(self.derive_context(frame)) def visit_Continue(self, node: nodes.Continue, frame: Frame) -> None: self.writeline("continue", node) def visit_Break(self, node: nodes.Break, frame: Frame) -> None: self.writeline("break", node) def visit_Scope(self, node: nodes.Scope, frame: Frame) -> None: scope_frame = frame.inner() scope_frame.symbols.analyze_node(node) self.enter_frame(scope_frame) self.blockvisit(node.body, scope_frame) self.leave_frame(scope_frame) def visit_OverlayScope(self, node: nodes.OverlayScope, frame: Frame) -> None: ctx = self.temporary_identifier() self.writeline(f"{ctx} = {self.derive_context(frame)}") self.writeline(f"{ctx}.vars = ") self.visit(node.context, frame) self.push_context_reference(ctx) scope_frame = frame.inner(isolated=True) scope_frame.symbols.analyze_node(node) self.enter_frame(scope_frame) self.blockvisit(node.body, scope_frame) self.leave_frame(scope_frame) self.pop_context_reference() def visit_EvalContextModifier( self, node: nodes.EvalContextModifier, frame: Frame ) -> None: for keyword in node.options: self.writeline(f"context.eval_ctx.{keyword.key} = ") self.visit(keyword.value, frame) try: val = keyword.value.as_const(frame.eval_ctx) except nodes.Impossible: frame.eval_ctx.volatile = True else: setattr(frame.eval_ctx, keyword.key, val) def visit_ScopedEvalContextModifier( self, node: nodes.ScopedEvalContextModifier, frame: Frame ) -> None: old_ctx_name = self.temporary_identifier() saved_ctx = frame.eval_ctx.save() self.writeline(f"{old_ctx_name} = context.eval_ctx.save()") self.visit_EvalContextModifier(node, frame) for child in node.body: self.visit(child, frame) frame.eval_ctx.revert(saved_ctx) self.writeline(f"context.eval_ctx.revert({old_ctx_name})")
pallets/jinja
src/jinja2/compiler.py
Python
bsd-3-clause
72,172
[ "VisIt" ]
1acf8df13849ece58ae3eade2a83bc5a1d595f3f79315a6104a3557fbe6a06bf
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gaussian mixture models Operations.""" # TODO(xavigonzalvo): Factor out covariance matrix operations to make # code reusable for different types (e.g. diag). from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow.python.ops.embedding_ops import embedding_lookup # Machine epsilon. MEPS = np.finfo(float).eps FULL_COVARIANCE = 'full' DIAG_COVARIANCE = 'diag' def _covariance(x, diag): """Defines the covariance operation of a matrix. Args: x: a matrix Tensor. Dimension 0 should contain the number of examples. diag: if True, it computes the diagonal covariance. Returns: A Tensor representing the covariance of x. In the case of diagonal matrix just the diagonal is returned. """ num_points = tf.to_float(tf.shape(x)[0]) x -= tf.reduce_mean(x, 0, keep_dims=True) if diag: cov = tf.reduce_sum( tf.square(x), 0, keep_dims=True) / (num_points - 1) else: cov = tf.matmul(x, x, transpose_a=True) / (num_points - 1) return cov def _init_clusters_random(data, num_clusters, random_seed): """Does random initialization of clusters. Args: data: a list of Tensors with a matrix of data, each row is an example. num_clusters: an integer with the number of clusters. random_seed: Seed for PRNG used to initialize seeds. Returns: A Tensor with num_clusters random rows of data. """ assert isinstance(data, list) num_data = tf.add_n([tf.shape(inp)[0] for inp in data]) with tf.control_dependencies([tf.assert_less_equal(num_clusters, num_data)]): indices = tf.random_uniform([num_clusters], minval=0, maxval=tf.cast(num_data, tf.int64), seed=random_seed, dtype=tf.int64) indices = tf.cast(indices, tf.int32) % num_data clusters_init = embedding_lookup(data, indices, partition_strategy='div') return clusters_init class GmmAlgorithm(object): """Tensorflow Gaussian mixture model clustering class.""" CLUSTERS_VARIABLE = 'clusters' CLUSTERS_COVS_VARIABLE = 'clusters_covs' def __init__(self, data, num_classes, initial_means=None, params='wmc', covariance_type=FULL_COVARIANCE, random_seed=0): """Constructor. Args: data: a list of Tensors with data, each row is a new example. num_classes: number of clusters. initial_means: a Tensor with a matrix of means. If None, means are computed by sampling randomly. params: Controls which parameters are updated in the training process. Can contain any combination of "w" for weights, "m" for means, and "c" for covariances. covariance_type: one of "full", "diag". random_seed: Seed for PRNG used to initialize seeds. Raises: Exception if covariance type is unknown. """ self._params = params self._random_seed = random_seed self._covariance_type = covariance_type if self._covariance_type not in [DIAG_COVARIANCE, FULL_COVARIANCE]: raise Exception( # pylint: disable=g-doc-exception 'programmer error: Invalid covariance type: %s' % self._covariance_type) # Create sharded variables for multiple shards. The following # lists are indexed by shard. # Probability per example in a class. num_shards = len(data) self._probs = [None] * num_shards # Prior probability. self._prior_probs = [None] * num_shards # Membership weights w_{ik} where "i" is the i-th example and "k" # is the k-th mixture. self._w = [None] * num_shards # Number of examples in a class. self._points_in_k = [None] * num_shards first_shard = data[0] self._dimensions = tf.shape(first_shard)[1] self._num_classes = num_classes # Small value to guarantee that covariances are invertible. self._min_var = tf.diag(tf.ones(tf.pack([self._dimensions]))) * 1e-3 self._create_variables(data, initial_means) # Operations of partial statistics for the computation of the means. self._w_mul_x = [] # Operations of partial statistics for the computation of the covariances. self._w_mul_x2 = [] self._define_graph(data) def _create_variables(self, data, initial_means=None): """Initializes GMM algorithm. Args: data: a list of Tensors with data, each row is a new example. initial_means: a Tensor with a matrix of means. """ first_shard = data[0] # Initialize means: num_classes X 1 X dimensions. if initial_means is not None: self._means = tf.Variable(tf.expand_dims(initial_means, 1), name=self.CLUSTERS_VARIABLE, validate_shape=False, dtype=tf.float32) else: # Sample data randomly self._means = tf.Variable(tf.expand_dims( _init_clusters_random(data, self._num_classes, self._random_seed), 1), name=self.CLUSTERS_VARIABLE, validate_shape=False) # Initialize covariances. if self._covariance_type == FULL_COVARIANCE: cov = _covariance(first_shard, False) + self._min_var # A matrix per class, num_classes X dimensions X dimensions covs = tf.tile( tf.expand_dims(cov, 0), [self._num_classes, 1, 1]) elif self._covariance_type == DIAG_COVARIANCE: cov = _covariance(first_shard, True) + self._min_var # A diagonal per row, num_classes X dimensions. covs = tf.tile(tf.expand_dims(tf.diag_part(cov), 0), [self._num_classes, 1]) self._covs = tf.Variable(covs, name='clusters_covs', validate_shape=False) # Mixture weights, representing the probability that a randomly # selected unobservable data (in EM terms) was generated by component k. self._alpha = tf.Variable(tf.tile([1.0 / self._num_classes], [self._num_classes])) def training_ops(self): """Returns the training operation.""" return self._train_ops def alphas(self): return self._alpha def clusters(self): """Returns the clusters with dimensions num_classes X 1 X num_dimensions.""" return self._means def covariances(self): """Returns the covariances matrices.""" return self._covs def assignments(self): """Returns a list of Tensors with the matrix of assignments per shard.""" ret = [] for w in self._w: ret.append(tf.argmax(w, 1)) return ret def scores(self): """Returns the distances to each class. Returns: A tuple with two Tensors. The first contains the distance to each class. The second contains the distance to the assigned class. """ return (self._all_scores, self._scores) def _define_graph(self, data): """Define graph for a single iteration. Args: data: a list of Tensors defining the training data. """ for shard_id, shard in enumerate(data): self._num_examples = tf.shape(shard)[0] shard = tf.expand_dims(shard, 0) self._define_log_prob_operation(shard_id, shard) self._define_prior_log_prob_operation(shard_id) self._define_expectation_operation(shard_id) self._define_partial_maximization_operation(shard_id, shard) self._define_maximization_operation(len(data)) self._define_distance_to_clusters(data) def _define_full_covariance_probs(self, shard_id, shard): """Defines the full covariance probabilties per example in a class. Updates a matrix with dimension num_examples X num_classes. Args: shard_id: id of the current shard. shard: current data shard, 1 X num_examples X dimensions. """ diff = shard - self._means cholesky = tf.cholesky(self._covs + self._min_var) log_det_covs = 2.0 * tf.reduce_sum(tf.log(tf.matrix_diag_part(cholesky)), 1) x_mu_cov = tf.square( tf.matrix_triangular_solve( cholesky, tf.transpose( diff, perm=[0, 2, 1]), lower=True)) diag_m = tf.transpose(tf.reduce_sum(x_mu_cov, 1)) self._probs[shard_id] = -0.5 * ( diag_m + tf.to_float(self._dimensions) * tf.log(2 * np.pi) + log_det_covs) def _define_diag_covariance_probs(self, shard_id, shard): """Defines the diagonal covariance probabilities per example in a class. Args: shard_id: id of the current shard. shard: current data shard, 1 X num_examples X dimensions. Returns a matrix num_examples * num_classes. """ # num_classes X 1 # TODO(xavigonzalvo): look into alternatives to log for # reparametrization of variance parameters. det_expanded = tf.reduce_sum(tf.log(self._covs + 1e-3), 1, keep_dims=True) diff = shard - self._means x2 = tf.square(diff) cov_expanded = tf.expand_dims(1.0 / (self._covs + 1e-3), 2) # num_classes X num_examples x2_cov = tf.batch_matmul(x2, cov_expanded) x2_cov = tf.transpose(tf.squeeze(x2_cov, [2])) self._probs[shard_id] = -0.5 * ( tf.to_float(self._dimensions) * tf.log(2.0 * np.pi) + tf.transpose(det_expanded) + x2_cov) def _define_log_prob_operation(self, shard_id, shard): """Probability per example in a class. Updates a matrix with dimension num_examples X num_classes. Args: shard_id: id of the current shard. shard: current data shard, 1 X num_examples X dimensions. """ # TODO(xavigonzalvo): Use the pdf defined in # third_party/tensorflow/contrib/distributions/python/ops/gaussian.py if self._covariance_type == FULL_COVARIANCE: self._define_full_covariance_probs(shard_id, shard) elif self._covariance_type == DIAG_COVARIANCE: self._define_diag_covariance_probs(shard_id, shard) self._probs[shard_id] += tf.log(self._alpha) def _define_prior_log_prob_operation(self, shard_id): """Computes the prior probability of all samples. Updates a vector where each item is the prior probabibility of an input example. Args: shard_id: id of current shard_id. """ self._prior_probs[shard_id] = tf.log( tf.reduce_sum(tf.exp(self._probs[shard_id]), 1, keep_dims=True)) def _define_expectation_operation(self, shard_id): # Shape broadcasting. probs = tf.expand_dims(self._probs[shard_id], 0) # Membership weights are computed as: # w_{ik} = \frac{\alpha_k f(\mathbf{y_i}|\mathbf{\theta}_k)} # {\sum_{m=1}^{K}\alpha_mf(\mathbf{y_i}|\mathbf{\theta}_m)} # where "i" is the i-th example, "k" is the k-th mixture, theta are # the model parameters and y_i the observations. # These are defined for each shard. self._w[shard_id] = tf.reshape( tf.exp(probs - self._prior_probs[shard_id]), tf.pack([self._num_examples, self._num_classes])) def _define_partial_maximization_operation(self, shard_id, shard): """Computes the partial statistics of the means and covariances. Args: shard_id: current shard id. shard: current data shard, 1 X num_examples X dimensions. """ # Soft assignment of each data point to each of the two clusters. self._points_in_k[shard_id] = tf.reduce_sum(self._w[shard_id], 0, keep_dims=True) # Partial means. w_mul_x = tf.expand_dims( tf.matmul(self._w[shard_id], tf.squeeze(shard, [0]), transpose_a=True), 1) self._w_mul_x.append(w_mul_x) # Partial covariances. x = tf.concat(0, [shard for _ in range(self._num_classes)]) x_trans = tf.transpose(x, perm=[0, 2, 1]) x_mul_w = tf.concat(0, [ tf.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0) for k in range(self._num_classes)]) self._w_mul_x2.append(tf.batch_matmul(x_mul_w, x)) def _define_maximization_operation(self, num_batches): """Maximization operations.""" # TODO(xavigonzalvo): some of these operations could be moved to C++. # Compute the effective number of data points assigned to component k. with tf.control_dependencies(self._w): points_in_k = tf.squeeze(tf.add_n(self._points_in_k), squeeze_dims=[0]) # Update alpha. if 'w' in self._params: final_points_in_k = points_in_k / num_batches num_examples = tf.to_float(tf.reduce_sum(final_points_in_k)) self._alpha_op = self._alpha.assign( final_points_in_k / (num_examples + MEPS)) else: self._alpha_op = tf.no_op() self._train_ops = [self._alpha_op] # Update means. points_in_k_expanded = tf.reshape(points_in_k, [self._num_classes, 1, 1]) if 'm' in self._params: self._means_op = self._means.assign( tf.div(tf.add_n(self._w_mul_x), points_in_k_expanded + MEPS)) else: self._means_op = tf.no_op() # means are (num_classes x 1 x dims) # Update covariances. with tf.control_dependencies([self._means_op]): b = tf.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS) new_covs = [] for k in range(self._num_classes): mean = self._means.ref()[k, :, :] square_mean = tf.matmul(mean, mean, transpose_a=True) new_cov = b[k, :, :] - square_mean + self._min_var if self._covariance_type == FULL_COVARIANCE: new_covs.append(tf.expand_dims(new_cov, 0)) elif self._covariance_type == DIAG_COVARIANCE: new_covs.append(tf.expand_dims(tf.diag_part(new_cov), 0)) new_covs = tf.concat(0, new_covs) if 'c' in self._params: # Train operations don't need to take care of the means # because covariances already depend on it. with tf.control_dependencies([self._means_op, new_covs]): self._train_ops.append( tf.assign(self._covs, new_covs, validate_shape=False)) def _define_distance_to_clusters(self, data): """Defines the Mahalanobis distance to the assigned Gaussian.""" # TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input - # mean) from log probability function. self._all_scores = [] for shard in data: all_scores = [] shard = tf.expand_dims(shard, 0) for c in xrange(self._num_classes): if self._covariance_type == FULL_COVARIANCE: cov = self._covs[c, :, :] elif self._covariance_type == DIAG_COVARIANCE: cov = tf.diag(self._covs[c, :]) inverse = tf.matrix_inverse(cov + self._min_var) inv_cov = tf.tile( tf.expand_dims(inverse, 0), tf.pack([self._num_examples, 1, 1])) diff = tf.transpose(shard - self._means[c, :, :], perm=[1, 0, 2]) m_left = tf.batch_matmul(diff, inv_cov) all_scores.append(tf.sqrt(tf.batch_matmul( m_left, tf.transpose(diff, perm=[0, 2, 1]) ))) self._all_scores.append(tf.reshape( tf.concat(1, all_scores), tf.pack([self._num_examples, self._num_classes]))) # Distance to the associated class. self._all_scores = tf.concat(0, self._all_scores) assignments = tf.concat(0, self.assignments()) rows = tf.to_int64(tf.range(0, self._num_examples)) indices = tf.concat(1, [tf.expand_dims(rows, 1), tf.expand_dims(assignments, 1)]) self._scores = tf.gather_nd(self._all_scores, indices) def _define_loglikelihood_operation(self): """Defines the total log-likelihood of current iteration.""" self._ll_op = [] for prior_probs in self._prior_probs: self._ll_op.append(tf.reduce_sum(tf.log(prior_probs))) tf.summary.scalar('ll', tf.reduce_sum(self._ll_op)) def gmm(inp, initial_clusters, num_clusters, random_seed, covariance_type=FULL_COVARIANCE, params='wmc'): """Creates the graph for Gaussian mixture model (GMM) clustering. Args: inp: An input tensor or list of input tensors initial_clusters: Specifies the clusters used during initialization. Can be a tensor or numpy array, or a function that generates the clusters. Can also be "random" to specify that clusters should be chosen randomly from input data. Note: type is diverse to be consistent with skflow. num_clusters: number of clusters. random_seed: Python integer. Seed for PRNG used to initialize centers. covariance_type: one of "diag", "full". params: Controls which parameters are updated in the training process. Can contain any combination of "w" for weights, "m" for means, and "c" for covars. Returns: Note: tuple of lists returned to be consistent with skflow A tuple consisting of: all_scores: A matrix (or list of matrices) of dimensions (num_input, num_clusters) where the value is the distance of an input vector and a cluster center. assignments: A vector (or list of vectors). Each element in the vector corresponds to an input row in 'inp' and specifies the cluster id corresponding to the input. scores: Similar to assignments but specifies the distance to the assigned cluster instead. training_op: an op that runs an iteration of training. """ initial_means = None if initial_clusters != 'random' and not isinstance( initial_clusters, tf.Tensor): initial_means = tf.constant(initial_clusters, dtype=tf.float32) # Implementation of GMM. inp = inp if isinstance(inp, list) else [inp] gmm_tool = GmmAlgorithm(inp, num_clusters, initial_means, params, covariance_type, random_seed) training_ops = gmm_tool.training_ops() assignments = gmm_tool.assignments() all_scores, scores = gmm_tool.scores() return [all_scores], [assignments], [scores], tf.group(*training_ops)
tongwang01/tensorflow
tensorflow/contrib/factorization/python/ops/gmm_ops.py
Python
apache-2.0
18,611
[ "Gaussian" ]
f92e27897cb26207749da5a331fbfca247375b144c99ea87240fdeb93391ac4a
""" Based upon examples by Scott Daniel (scottvalscott@gmail.com) found here: https://stash.lsstcorp.org/projects/SIM/repos/sims_catutils/browse/python/lsst/sims/ catUtils/exampleCatalogDefinitions/phoSimCatalogExamples.py """ import os from collections import OrderedDict import pickle from lsst.sims.catalogs.generation.db import CatalogDBObject from lsst.sims.catalogs.measures.instance import CompoundInstanceCatalog from lsst.sims.catUtils.baseCatalogModels import GalaxyTileCompoundObj from lsst.sims.catUtils.exampleCatalogDefinitions.phoSimCatalogExamples import \ PhoSimCatalogPoint, PhoSimCatalogSersic2D, PhoSimCatalogZPoint from lsst.sims.catUtils.utils import ObservationMetaDataGenerator from sprinkler import sprinklerCompound class InstcatFactory(object): def __init__(self, objid, phosimCatalogObject): while True: # This loop is a workaround for UW catsim db connection intermittency. try: self.db_obj = CatalogDBObject.from_objid(objid) break except RuntimeError: continue self.cat_obj = phosimCatalogObject def __call__(self, obs_metadata): return self.cat_obj(self.db_obj, obs_metadata=obs_metadata) class InstcatGenerator(object): def __init__(self, opsim_db, fieldRA, fieldDec, boundLength=0.3, pickle_file=None, sprinkle=True): self._set_instcatFactories() self._set_obs_md_results(opsim_db, fieldRA, fieldDec, boundLength, pickle_file) self.sprinkle = sprinkle def _set_instcatFactories(self): self._instcatFactories = {} starObjNames = ['msstars', 'bhbstars', 'wdstars', 'rrlystars', 'cepheidstars'] for objid in starObjNames: self.update_factories(objid, PhoSimCatalogPoint) self.update_factories('galaxyBulge', PhoSimCatalogSersic2D) self.update_factories('galaxyDisk', PhoSimCatalogSersic2D) self.update_factories('galaxyAgn', PhoSimCatalogZPoint) def update_factories(self, objid, catobj): self._instcatFactories[objid] = InstcatFactory(objid, catobj) def _set_obs_md_results(self, opsim_db, fieldRA, fieldDec, boundLength, pickle_file): if pickle_file is not None and os.path.isfile(pickle_file): self.obs_md_results = pickle.load(open(pickle_file)) else: # Generate the observation metadata from the db file. gen = ObservationMetaDataGenerator(database=opsim_db, driver='sqlite') self.obs_md_results = gen.getObservationMetaData(fieldRA=fieldRA, fieldDec=fieldDec, boundLength=boundLength) if pickle_file is not None: pickle.dump(self.obs_md_results, open(pickle_file, 'w')) def find_visits(self, bandpass, nmax=None): # Use an OrderedDict to gather the visits since a visit will # have multiple entries in the Summary table if it is part of # more than one proposal. visits = OrderedDict() for obs_metadata in self.obs_md_results: if nmax is not None and len(visits) == nmax: break if obs_metadata.bandpass == bandpass: obshistid = obs_metadata.phoSimMetaData['Opsim_obshistid'][0] visits[obshistid] = obs_metadata return visits def write_catalog(self, outfile, obs_metadata, clobber=True): if clobber and os.path.isfile(outfile): os.remove(outfile) cat_list = [] for objid in self._instcatFactories: cat_list.append(self._instcatFactories[objid](obs_metadata)) if self.sprinkle: compoundDBclass = sprinklerCompound else: compoundDBclass = GalaxyTileCompoundObj while True: try: my_cat = CompoundInstanceCatalog(cat_list, obs_metadata=obs_metadata, compoundDBclass=compoundDBclass) break except RuntimeError: continue my_cat.write_catalog(outfile) if __name__ == '__main__': import os import time fieldID = 1427 fieldRA = (53, 54) fieldDec = (-29, -27) opsim_db = '/nfs/slac/g/ki/ki18/jchiang/DESC/Twinkles/work/enigma_1189_sqlite.db' pickle_file = 'obs_metadata_enigma_1189_%(fieldID)i.pickle' % locals() t0 = time.time() generator = InstcatGenerator(opsim_db, fieldRA, fieldDec, pickle_file=pickle_file) print "Set up time:", time.time() - t0 nmax = 20 for bandpass in 'ugrizy': print "band pass:", bandpass visits = generator.find_visits(bandpass, nmax=nmax) i = 0 for obshistid, visit in visits.items(): i += 1 outfile = 'phosim_input_%s_%07i.txt' % (bandpass, obshistid) print i, outfile if os.path.isfile(outfile): continue while True: try: generator.write_catalog(outfile, visit) break except KeyError, eobj: print eobj, "trying again" os.remove(outfile)
rbiswas4/Twinkles
twinkles/InstcatGenerator.py
Python
mit
5,512
[ "VisIt" ]
82409c448762a26ce766e28849f380473c166bef804da8ae48046d2e6ce0b0bc
#!/usr/bin/env python # # Copyright 2006 The Closure Library Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Calculates JavaScript dependencies without requiring Google's build system. This tool is deprecated and is provided for legacy users. See build/closurebuilder.py and build/depswriter.py for the current tools. It iterates over a number of search paths and builds a dependency tree. With the inputs provided, it walks the dependency tree and outputs all the files required for compilation. """ try: import distutils.version except ImportError: # distutils is not available in all environments distutils = None import logging import optparse import os import re import subprocess import sys _BASE_REGEX_STRING = '^\s*goog\.%s\(\s*[\'"](.+)[\'"]\s*\)' req_regex = re.compile(_BASE_REGEX_STRING % 'require') prov_regex = re.compile(_BASE_REGEX_STRING % 'provide') ns_regex = re.compile('^ns:((\w+\.)*(\w+))$') version_regex = re.compile('[\.0-9]+') def IsValidFile(ref): """Returns true if the provided reference is a file and exists.""" return os.path.isfile(ref) def IsJsFile(ref): """Returns true if the provided reference is a Javascript file.""" return ref.endswith('.js') def IsNamespace(ref): """Returns true if the provided reference is a namespace.""" return re.match(ns_regex, ref) is not None def IsDirectory(ref): """Returns true if the provided reference is a directory.""" return os.path.isdir(ref) def ExpandDirectories(refs): """Expands any directory references into inputs. Description: Looks for any directories in the provided references. Found directories are recursively searched for .js files, which are then added to the result list. Args: refs: a list of references such as files, directories, and namespaces Returns: A list of references with directories removed and replaced by any .js files that are found in them. Also, the paths will be normalized. """ result = [] for ref in refs: if IsDirectory(ref): # Disable 'Unused variable' for subdirs # pylint: disable-msg=W0612 for (directory, subdirs, filenames) in os.walk(ref): for filename in filenames: if IsJsFile(filename): result.append(os.path.join(directory, filename)) else: result.append(ref) return map(os.path.normpath, result) class DependencyInfo(object): """Represents a dependency that is used to build and walk a tree.""" def __init__(self, filename): self.filename = filename self.provides = [] self.requires = [] def __str__(self): return '%s Provides: %s Requires: %s' % (self.filename, repr(self.provides), repr(self.requires)) def BuildDependenciesFromFiles(files): """Build a list of dependencies from a list of files. Description: Takes a list of files, extracts their provides and requires, and builds out a list of dependency objects. Args: files: a list of files to be parsed for goog.provides and goog.requires. Returns: A list of dependency objects, one for each file in the files argument. """ result = [] filenames = set() for filename in files: if filename in filenames: continue # Python 3 requires the file encoding to be specified if (sys.version_info[0] < 3): file_handle = open(filename, 'r') else: file_handle = open(filename, 'r', encoding='utf8') try: dep = CreateDependencyInfo(filename, file_handle) result.append(dep) finally: file_handle.close() filenames.add(filename) return result def CreateDependencyInfo(filename, source): """Create dependency info. Args: filename: Filename for source. source: File-like object containing source. Returns: A DependencyInfo object with provides and requires filled. """ dep = DependencyInfo(filename) for line in source: if re.match(req_regex, line): dep.requires.append(re.search(req_regex, line).group(1)) if re.match(prov_regex, line): dep.provides.append(re.search(prov_regex, line).group(1)) return dep def BuildDependencyHashFromDependencies(deps): """Builds a hash for searching dependencies by the namespaces they provide. Description: Dependency objects can provide multiple namespaces. This method enumerates the provides of each dependency and adds them to a hash that can be used to easily resolve a given dependency by a namespace it provides. Args: deps: a list of dependency objects used to build the hash. Raises: Exception: If a multiple files try to provide the same namepace. Returns: A hash table { namespace: dependency } that can be used to resolve a dependency by a namespace it provides. """ dep_hash = {} for dep in deps: for provide in dep.provides: if provide in dep_hash: raise Exception('Duplicate provide (%s) in (%s, %s)' % ( provide, dep_hash[provide].filename, dep.filename)) dep_hash[provide] = dep return dep_hash def CalculateDependencies(paths, inputs): """Calculates the dependencies for given inputs. Description: This method takes a list of paths (files, directories) and builds a searchable data structure based on the namespaces that each .js file provides. It then parses through each input, resolving dependencies against this data structure. The final output is a list of files, including the inputs, that represent all of the code that is needed to compile the given inputs. Args: paths: the references (files, directories) that are used to build the dependency hash. inputs: the inputs (files, directories, namespaces) that have dependencies that need to be calculated. Raises: Exception: if a provided input is invalid. Returns: A list of all files, including inputs, that are needed to compile the given inputs. """ deps = BuildDependenciesFromFiles(paths + inputs) search_hash = BuildDependencyHashFromDependencies(deps) result_list = [] seen_list = [] for input_file in inputs: if IsNamespace(input_file): namespace = re.search(ns_regex, input_file).group(1) if namespace not in search_hash: raise Exception('Invalid namespace (%s)' % namespace) input_file = search_hash[namespace].filename if not IsValidFile(input_file) or not IsJsFile(input_file): raise Exception('Invalid file (%s)' % input_file) seen_list.append(input_file) file_handle = open(input_file, 'r') try: for line in file_handle: if re.match(req_regex, line): require = re.search(req_regex, line).group(1) ResolveDependencies(require, search_hash, result_list, seen_list) finally: file_handle.close() result_list.append(input_file) # All files depend on base.js, so put it first. base_js_path = FindClosureBasePath(paths) if base_js_path: result_list.insert(0, base_js_path) else: logging.warning('Closure Library base.js not found.') return result_list def FindClosureBasePath(paths): """Given a list of file paths, return Closure base.js path, if any. Args: paths: A list of paths. Returns: The path to Closure's base.js file including filename, if found. """ for path in paths: pathname, filename = os.path.split(path) if filename == 'base.js': f = open(path) is_base = False # Sanity check that this is the Closure base file. Check that this # is where goog is defined. for line in f: if line.startswith('var goog = goog || {};'): is_base = True break f.close() if is_base: return path def ResolveDependencies(require, search_hash, result_list, seen_list): """Takes a given requirement and resolves all of the dependencies for it. Description: A given requirement may require other dependencies. This method recursively resolves all dependencies for the given requirement. Raises: Exception: when require does not exist in the search_hash. Args: require: the namespace to resolve dependencies for. search_hash: the data structure used for resolving dependencies. result_list: a list of filenames that have been calculated as dependencies. This variable is the output for this function. seen_list: a list of filenames that have been 'seen'. This is required for the dependency->dependant ordering. """ if require not in search_hash: raise Exception('Missing provider for (%s)' % require) dep = search_hash[require] if not dep.filename in seen_list: seen_list.append(dep.filename) for sub_require in dep.requires: ResolveDependencies(sub_require, search_hash, result_list, seen_list) result_list.append(dep.filename) def GetDepsLine(dep, base_path): """Returns a JS string for a dependency statement in the deps.js file. Args: dep: The dependency that we're printing. base_path: The path to Closure's base.js including filename. """ return 'goog.addDependency("%s", %s, %s);' % ( GetRelpath(dep.filename, base_path), dep.provides, dep.requires) def GetRelpath(path, start): """Return a relative path to |path| from |start|.""" # NOTE: Python 2.6 provides os.path.relpath, which has almost the same # functionality as this function. Since we want to support 2.4, we have # to implement it manually. :( path_list = os.path.abspath(os.path.normpath(path)).split(os.sep) start_list = os.path.abspath( os.path.normpath(os.path.dirname(start))).split(os.sep) common_prefix_count = 0 for i in range(0, min(len(path_list), len(start_list))): if path_list[i] != start_list[i]: break common_prefix_count += 1 # Always use forward slashes, because this will get expanded to a url, # not a file path. return '/'.join(['..'] * (len(start_list) - common_prefix_count) + path_list[common_prefix_count:]) def PrintLine(msg, out): out.write(msg) out.write('\n') def PrintDeps(source_paths, deps, out): """Print out a deps.js file from a list of source paths. Args: source_paths: Paths that we should generate dependency info for. deps: Paths that provide dependency info. Their dependency info should not appear in the deps file. out: The output file. Returns: True on success, false if it was unable to find the base path to generate deps relative to. """ base_path = FindClosureBasePath(source_paths + deps) if not base_path: return False PrintLine('// This file was autogenerated by calcdeps.py', out) excludesSet = set(deps) for dep in BuildDependenciesFromFiles(source_paths + deps): if not dep.filename in excludesSet: PrintLine(GetDepsLine(dep, base_path), out) return True def PrintScript(source_paths, out): for index, dep in enumerate(source_paths): PrintLine('// Input %d' % index, out) f = open(dep, 'r') PrintLine(f.read(), out) f.close() def GetJavaVersion(): """Returns the string for the current version of Java installed.""" proc = subprocess.Popen(['java', '-version'], stderr=subprocess.PIPE) proc.wait() version_line = proc.stderr.read().splitlines()[0] return version_regex.search(version_line).group() def FilterByExcludes(options, files): """Filters the given files by the exlusions specified at the command line. Args: options: The flags to calcdeps. files: The files to filter. Returns: A list of files. """ excludes = [] if options.excludes: excludes = ExpandDirectories(options.excludes) excludesSet = set(excludes) return [i for i in files if not i in excludesSet] def GetPathsFromOptions(options): """Generates the path files from flag options. Args: options: The flags to calcdeps. Returns: A list of files in the specified paths. (strings). """ search_paths = options.paths if not search_paths: search_paths = ['.'] # Add default folder if no path is specified. search_paths = ExpandDirectories(search_paths) return FilterByExcludes(options, search_paths) def GetInputsFromOptions(options): """Generates the inputs from flag options. Args: options: The flags to calcdeps. Returns: A list of inputs (strings). """ inputs = options.inputs if not inputs: # Parse stdin logging.info('No inputs specified. Reading from stdin...') inputs = filter(None, [line.strip('\n') for line in sys.stdin.readlines()]) logging.info('Scanning files...') inputs = ExpandDirectories(inputs) return FilterByExcludes(options, inputs) def Compile(compiler_jar_path, source_paths, out, flags=None): """Prepares command-line call to Closure compiler. Args: compiler_jar_path: Path to the Closure compiler .jar file. source_paths: Source paths to build, in order. flags: A list of additional flags to pass on to Closure compiler. """ args = ['java', '-jar', compiler_jar_path] for path in source_paths: args += ['--js', path] if flags: args += flags logging.info('Compiling with the following command: %s', ' '.join(args)) proc = subprocess.Popen(args, stdout=subprocess.PIPE) (stdoutdata, stderrdata) = proc.communicate() if proc.returncode != 0: logging.error('JavaScript compilation failed.') sys.exit(1) else: out.write(stdoutdata) def main(): """The entrypoint for this script.""" logging.basicConfig(format='calcdeps.py: %(message)s', level=logging.INFO) usage = 'usage: %prog [options] arg' parser = optparse.OptionParser(usage) parser.add_option('-i', '--input', dest='inputs', action='append', help='The inputs to calculate dependencies for. Valid ' 'values can be files, directories, or namespaces ' '(ns:goog.net.XhrLite). Only relevant to "list" and ' '"script" output.') parser.add_option('-p', '--path', dest='paths', action='append', help='The paths that should be traversed to build the ' 'dependencies.') parser.add_option('-d', '--dep', dest='deps', action='append', help='Directories or files that should be traversed to ' 'find required dependencies for the deps file. ' 'Does not generate dependency information for names ' 'provided by these files. Only useful in "deps" mode.') parser.add_option('-e', '--exclude', dest='excludes', action='append', help='Files or directories to exclude from the --path ' 'and --input flags') parser.add_option('-o', '--output_mode', dest='output_mode', action='store', default='list', help='The type of output to generate from this script. ' 'Options are "list" for a list of filenames, "script" ' 'for a single script containing the contents of all the ' 'file, "deps" to generate a deps.js file for all ' 'paths, or "compiled" to produce compiled output with ' 'the Closure compiler.') parser.add_option('-c', '--compiler_jar', dest='compiler_jar', action='store', help='The location of the Closure compiler .jar file.') parser.add_option('-f', '--compiler_flag', '--compiler_flags', # for backwards compatability dest='compiler_flags', action='append', help='Additional flag to pass to the Closure compiler. ' 'May be specified multiple times to pass multiple flags.') parser.add_option('--output_file', dest='output_file', action='store', help=('If specified, write output to this path instead of ' 'writing to standard output.')) (options, args) = parser.parse_args() search_paths = GetPathsFromOptions(options) if options.output_file: out = open(options.output_file, 'w') else: out = sys.stdout if options.output_mode == 'deps': result = PrintDeps(search_paths, ExpandDirectories(options.deps or []), out) if not result: logging.error('Could not find Closure Library in the specified paths') sys.exit(1) return inputs = GetInputsFromOptions(options) logging.info('Finding Closure dependencies...') deps = CalculateDependencies(search_paths, inputs) output_mode = options.output_mode if output_mode == 'script': PrintScript(deps, out) elif output_mode == 'list': # Just print out a dep per line for dep in deps: PrintLine(dep, out) elif output_mode == 'compiled': # Make sure a .jar is specified. if not options.compiler_jar: logging.error('--compiler_jar flag must be specified if --output is ' '"compiled"') sys.exit(1) # User friendly version check. if distutils and not (distutils.version.LooseVersion(GetJavaVersion()) > distutils.version.LooseVersion('1.6')): logging.error('Closure Compiler requires Java 1.6 or higher.') logging.error('Please visit http://www.java.com/getjava') sys.exit(1) Compile(options.compiler_jar, deps, out, options.compiler_flags) else: logging.error('Invalid value for --output flag.') sys.exit(1) if __name__ == '__main__': main()
Digaku/closure-library
closure/bin/calcdeps.py
Python
apache-2.0
18,506
[ "VisIt" ]
f02b21e7b3c3ce9efbf9cafa1a2b82e614a6bc3e0dbb984273e0cd3e9570f07b
""" subsurface_fracture_model.py ---------------------------- Simulate the evolution of a petroleum fluid transiting a single pathway through a subsurface fracture. WARNING!!! THIS MODULE IS UNDER CONSTRUCTION AND DOES NOT YET REPRESENT AN ACCURATE SIMULATION OF THE OCEAN SUBSURFACE. PLEASE DO NOT USE THIS FOR ANY PURPOSES AT THIS TIME. Scott Socolofsky, 02/03/2022. """ # S. Socolofsky, January 2021, <socolofs@tamu.edu> from __future__ import (absolute_import, division, print_function) from tamoc import seawater, dbm from tamoc import lfm import numpy as np import matplotlib.pyplot as plt class Model(object): """ Master class object for controlling and post-processing the model Master class object for the subsurface fracture model. This model class generates a fracture network and can simulate the transport, transformation, and chemical reactions of petroleum fluids flowing through those fractures. Parameters ---------- profile : `ambient.Profile` object, default = None An object containing the ambient temperature, salinity, pressure, and ambient concentration data and associated methods to interpolate that data to different subsurface depths. If stored as a netCDF dataset, the `ambient.Profile` object may be open or closed at instantiation. If open, the initializer will close the file since this model does not support changing the ambient data once initialized. """ def __init__(self, H, Hs, dx, du, mu_d, sigma_d, x0=np.zeros(2), delta_s=1.): super(Model, self).__init__() # Store the model parameters self.H = H self.Hs = Hs self.dx = dx self.du = du self.mu_d = mu_d self.sigma_d = sigma_d self.x0 = x0 self.delta_s = delta_s # Compute the derived quantities self.mu_A = np.pi * (self.mu_d / 2.)**2 self.sigma_A = np.pi * (self.sigma_d / 2.)**2 # Adjust to a z = depth coordinate system self.du[2] *= -1. # Set the fixed model parameters for the simulation that the user # cannot adjust self.p = ModelParams() # Generate the fracture network self._gen_fracture_path() # Set the simulation flag to false self.ran_sim = False def _gen_fracture_path(self): """ Generate a fracture path for this model Parameters ---------- H : float Water depth at the outlet of the fracture network (m) Hs : float Thickness of the subsurface layer from the petroleum reservoir to the sea floor (m) dx : ndarray Array of coordinate in the x-, y-, and z-directions giving the average fluctuating displacements along each straight tube segment of the network (m). du : ndarray Average pseudo-advection step in the x-, y-, and z-directions in length per line segment length (m) mu_A : float Arithmetic average of the cross-sectional areas of each segment of the fracture network (m^2) sigma_A : float Sample standard deviation of the cross-sectional areas of each segment of the fracture network (m^2) x0 : ndarray, default=np.zeros(3) Planar coordinates of the origin of this fracture network at the petroleum source in the x- and y-directions (easterly and northerly; meters) delta_s : float Spatial step to take when building the fracture network. Attributes ---------- xp : ndarray Two-dimensional array of vertex positions for each segment in the fracture network. Each row contains a different point, with the three columns reporting the x-, y-, and z-coordinates of the vertex (easterly, northerly, and depth; meters). """ # Create random-walk network self.xp = fracture_network(self.H, self.Hs, self.dx, self.du, self.delta_s) # Shift network to origin self.xp[:,:2] = self.x0 + self.xp[:,:2] # Select diameters for each segment of the network using a log-normal # distribution self.As = fracture_areas(self.xp.shape[0], self.mu_A, self.sigma_A) # Generate a path-length coordinate system sp = np.zeros(self.xp.shape[0]) for i in range(len(sp)-1): # Set the base of this segment at the end of the previous segment sp[i+1] = sp[i] for j in range(3): # Add the length of this segment sp[i+1] += np.sqrt((self.xp[i+1,j] - self.xp[i,j])**2) self.sp = sp # Create some interpolators from scipy.interpolate import interp1d fill_value = (self.xp[0,:], self.xp[-1,:]) self.x = interp1d(self.sp, self.xp, axis=0, fill_value=fill_value, bounds_error=False) def simulate_pipe_flow(self, u0, mass_frac, fluid, profile, dt_max=60.): """ Simulate the gas migration assuming tubes are full of reservoir fluid """ # Store the input variables in the model attributes self.u0 = u0 self.mass_frac = mass_frac self.fluid = fluid self.profile = profile self.dt_max = dt_max # Choose a heat capacity value for the petroleum fluid self.cp = seawater.cp() * 0.5 # Create an initial state space vector from the given input variables t0, y0, self.m_dot = lfm.main_ic(self.x(0)[2], self.u0, self.get_A_seg(0), self.mass_frac, self.fluid, self.cp, self.profile) # Create a Lagrangian Parcel object to handle the properties of the # Lagrangian element self.y_local = PipeParcel(t0, y0, self.p, self.m_dot, self.fluid, self.cp, self.x, self.get_A_seg, self.profile) # Compute the evolution along this flow path print('\n-- TEXAS A&M OIL-SPILL CALCULATOR (TAMOC) --') print('-- Subsurface Fracture Model --\n') self.t, self.y = lfm.calculate_pipe(np.max(self.sp), self.y_local, t0, y0, self.dt_max) def get_A_seg(self, s): """ Return the cross-sectional area for the segment at this path position """ # Find the index to this path point if s > self.sp[-1]: # ODE is trying to solve a point just outside the network... ip = -1 elif s < self.sp[0]: # ODE is trying to solve a point just before the network... ip = 0 else: # This is within the network ip = np.max(np.where(self.sp <= s)) + 1 # Return the area return self.As[ip] def new_fracture_path(self, H, Hs, dx, ds, du, mu_d, sigma_d, x0=np.zeros(2), delta_s=1.): """ Generate a new fracture path for this model """ # Store the new model parameters self.H = H self.Hs = Hs self.dx = dx self.ds = ds self.du = du self.mu_d = mu_d self.sigma_d = sigma_d self.x0 = x0 self.delta_s = delta_s # Compute the derived quantities self.mu_A = np.pi * (self.mu_d / 2.)**2 self.sigma_A = np.pi * (self.sigma_d / 2.)**2 # Create the new path self._gen_fracture_path() # Reset the simulation flag to False self.ran_sim = False def show_network(self, fig=1): """ Plot the fracture network """ show_network(self.xp, fig) def plot_state_space(self, fig=2): """ Create a default plot of the state space solution """ plot_state_space(self.t, self.y, self.y_local, fig) def plot_component_map(self, comps=None, fig=3): """ docstring for plot_component_map """ # If no composition specified, plot all components if isinstance(comps, type(None)): comps = self.y_local.composition # Create the plot plot_component_map(self.t, self.y, self.y_local, comps, fig) class ModelParams(object): """ Fixed parameters used in the subsurface fracture model Fixed model parameters that the user should not adjust and that are used by the subsurface fracture model. These include parameters such as entrainment coefficients and other model constants that have been fit to data and are not considered calibration coefficients. Parameters ---------- """ def __init__(self): super(ModelParams, self).__init__() # Set the model parameters pass class PipeParcel(object): """ Lagrangian element for a slice of fluid in a pipe-flow """ def __init__(self, t0, y0, p, m_dot, fluid, cp, x, A, profile): super(PipeParcel, self).__init__() # Store the initial values of the input variables self.t0 = t0 self.y0 = y0 self.p = p self.m_dot = m_dot self.fluid = fluid self.cp = cp self.x = x self.A = A self.profile = profile # Extract some additional parameters self.composition = self.fluid.composition # Update the parcel with the present state space self.update(t0, y0) def update(self, t, y): """ Extract the derived quantities from the state space vector """ # Save the current state-space vector self.t = t self.y = y # Extract the state-space variables from the state-space self.s = y[0] self.m = y[1:-1] self.h = y[-1] # Get the current position in space and the pipe properties self.xp, self.yp, self.zp = self.x(self.s) self.Ap = self.A(self.s) # Get the local ambient conditions self.Pa, self.Ta, self.Sa = self.profile.get_values(self.zp, ['pressure', 'temperature', 'salinity']) self.Ca = self.profile.get_values(self.zp, self.composition) self.rho_a = seawater.density(self.Ta, self.Sa, self.Pa) # Compute the derived quantities self.T = self.h / (np.sum(self.m) * self.cp) self.rho = self.fluid.density(self.m, self.T, self.Pa)[0] self.us = self.m_dot / self.Ap / self.rho self.V = np.sum(self.m) / self.rho self.hs = self.V / self.Ap self.ds = np.sqrt(self.Ap / np.pi) * 2. self.As = np.pi * self.ds def fracture_network(H, Hs, dx, du, delta_s): """ Generate the fracture network for the given parameters """ # Set the origin of the fracture network x = [np.array([0., 0., H + Hs])] # Compute the effective diffusivities for the random-walk model of the # fracture network ds = np.sqrt(np.sum(dx**2)) D = dx**2 / ds # Import a random number generator to create the steps from scipy.stats import norm mu = 0. sigma = 1. # Find points along the fracture network until we reach the seabed while x[-1][2] > H: # Generate the next point along the trajectory x_new = np.zeros(3) for i in range(len(x_new)): # First, the random step r = norm.rvs(mu, scale=sigma, size=1) x_new[i] = x[-1][i] + r * np.sqrt(D[i]* delta_s) # Then, the deterministic, pseudo-advection step x_new[i] += du[i] * delta_s x.append(x_new) # Convert x to a numpy array x = np.array(x) # Set the final point to be at the mud line dl = (H - x[-2,2]) / (x[-1,2] - x[-2,2]) x[-1,:] = dl * (x[-1,:] - x[-2,:]) + x[-2,:] # Return the positions return x def fracture_areas(n_A, mu_A, sigma_A, dist='lognorm'): """ Generate a set of cross-sectional areas for each segment of a network Generate a set of cross-sectional areas with mean mu_A and standard deviation sigma_A for each segment of a fracture network given by the coordinate points x, y, and z. Parameters ---------- n_A : int Number of segments in the fracture network mu_A : float Arithmetic average of the cross-sectional areas of each segment of the fracture network (m^2) sigma_A : float Sample standard deviation of the cross-sectional areas of each segment of the fracture network (m^2) """ # Generate the areas from a probability density function if dist == 'lognorm': from scipy.stats import lognorm mu = np.log(mu_A / np.sqrt(1. + (sigma_A / mu_A)**2)) sigma = np.sqrt(np.log(1. + (sigma_A / mu_A)**2)) seg_A = lognorm.rvs(sigma, scale=np.exp(mu), size=n_A) # Return the areas return seg_A def show_network(xp, fig): """ docstring for show_network """ # Create the figure plt.figure(fig, figsize=(11,6)) plt.clf() # Some formatting commands marker_fmt = {'markerfacecolor':'w', 'label':'_no_legend_'} # Create two subplots ax = plt.subplot(121) ax.plot(xp[:,0], xp[:,2], 'b-') ax.plot(xp[:,1], xp[:,2], 'g-') ax.legend(('Easterly path', 'Northerly path')) ax.plot(xp[0,0], xp[0,2], 'ko', **marker_fmt) ax.plot(xp[-1,0], xp[-1,2], 'ko', **marker_fmt) ax.plot(xp[0,1], xp[0,2], 'ko', **marker_fmt) ax.plot(xp[-1,1], xp[-1,2], 'ko', **marker_fmt) ax.invert_yaxis() ax.set_xlabel('Distance, (m)') ax.set_ylabel('Depth, (m)') ax = plt.subplot(122) ax.plot(xp[:,0], xp[:,1], 'm-') ax.plot(xp[0,0], xp[0,1], 'ko', **marker_fmt) ax.plot(xp[-1,0], xp[-1,1], 'ko', **marker_fmt) ax.set_xlabel('Easterly distance, (m)') ax.set_ylabel('Northerly distance, (m)') plt.show() def plot_state_space(t, y, parcel, fig): """ docstring for plot_state_space """ # Extract the state-space variables s = y[:,0] m = y[:,1:-1] h = y[:,-1] # Convert heat to temperature T = np.zeros(h.shape) for i in range(len(T)): T[i] = h[i] / (parcel.cp * np.sum(m[i,:])) # Plot the variables plt.figure(fig, figsize=(11,9)) plt.clf() # Plot position ax = plt.subplot(131) ax.plot(t / 3600., s) ax.set_xlabel('Time, (hrs)') ax.set_ylabel('Distance, (m)') # Plot the masses ax = plt.subplot(132) ax.semilogx(m, s) ax.set_xlabel('Mass, (kg)') ax.legend(parcel.composition) # Plot the temperature ax = plt.subplot(133) ax.plot(T - 273.15, s) ax.set_xlabel('Temperature, (deg C)') plt.show() def plot_component_map(t, y, parcel, comps, fig): """ docstring for plot_component_map """ from matplotlib.collections import LineCollection # Extract the state-space variables s = y[:,0] h = y[:,-1] # Get the x,y,z coordinates x = np.zeros((len(t), 3)) for i in range(len(t)): x[i,:] = parcel.x(s[i]) # Get the indices to the components im = [parcel.composition.index(comp) for comp in comps if comp in parcel.composition] # Get the component masses m = np.zeros((len(t), len(comps))) for i in range(len(t)): m[i,:] = y[i,1:-1][im] # Figure out the figure size and number of subplots if len(comps) >= 5: cols = 5 else: cols = len(comps) if cols == 5: rows = int(len(comps) / cols) if len(comps) % cols > 0: rows += 1 else: rows = 1 figsize = (2.5 * cols, 4 * rows) # Plot each component one at a time figure = plt.figure(fig, figsize=figsize) plt.clf() add_bar = True for i in range(len(comps)): ax = plt.subplot(rows, cols, i+1) # ax.plot(m[:,i], s, label=comps[i]) # ax.legend() # ax.set_xlabel(comps[i] + ' mass, (kg)') # plt.setp(ax.get_xticklabels(), rotation=30, # horizontalalignment='right') # if i % cols == 0: # ax.set_ylabel('Distance, (m)') # plt.tight_layout() # plt.show() for j in range(2): points = np.array([x[:,j], x[:,2]]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) norm= plt.Normalize(0, np.max(m)) if j == 0: lc = LineCollection(segments, cmap='viridis', norm=norm, label=comps[i]) else: lc = LineCollection(segments, cmap='viridis', norm=norm) lc.set_array(m[:,i]) line = ax.add_collection(lc) ax.set(xlim=(np.min(x[:,j]), np.max(x[:,j])), ylim=(np.min(x[:,2]), np.max(x[:,2]))) if add_bar: if cols / (i+1) == 1: figure.colorbar(line, ax=ax, label='Mass, (kg)') add_bar = False ax.set_xlabel('Distance, (m)') if i % cols == 0: ax.set_ylabel('Depth, (m)') ax.invert_yaxis() ax.legend() plt.tight_layout() plt.show()
socolofs/tamoc
tamoc/subsurface_fracture_model.py
Python
mit
17,747
[ "NetCDF" ]
91e4b9aae654e60616994ada905d0f12cb4cc8c7ae1e25a52d0abffda33776f5
"""The PLSOM.""" import logging from typing import Callable, Dict, Optional, Tuple import numpy as np from tqdm import tqdm from somber.som import BaseSom from somber.components.initializers import range_initialization from somber.components.utilities import Scaler logger = logging.getLogger(__name__) class PLSom(BaseSom): # Static property names param_names = {"map_dimensions", "weights", "data_dimensionality", "params"} def __init__( self, map_dimensions: Tuple[int], data_dimensionality: Optional[int] = None, beta: Optional[float] = None, initializer: Callable = range_initialization, scaler: Optional[Scaler] = None, ) -> None: """ An implementation of the PLSom. The ParameterLess Som is a SOM which does not rely on time-induced plasticity adaptation. Instead, the plasticity of the SOM is adapted in an online fashion by continuously monitoring the error of each presented item. In general, the PLSom is less prone to catastrophic interference, or "forgetting" than the original SOM. Simultaneously, it is also more suited to re-adapting to changes in distribution. This is because the SOM loses its plasticity according to an exponentially decreasing learning rate and neighborhood size. :param map_dimensions: A tuple describing the map size. For example, (10, 10) will create a 10 * 10 map with 100 neurons, while (10, 10, 10) creates a 10 * 10 * 10 map with 1000 neurons. :param data_dimensionality: The dimensionality of the input data. :param initializer: A function which takes in the input data and weight matrix and returns an initialized weight matrix. The initializers are defined in somber.components.initializers. Can be set to None. :param scaler: An initialized instance of Scaler() which is used to scale the data to have mean 0 and stdev 1. """ super().__init__( map_dimensions, data_dimensionality=data_dimensionality, argfunc="argmin", valfunc="min", params={"r": {"value": 0, "factor": 1, "orig": 0}}, initializer=initializer, scaler=scaler, ) self.beta = beta if beta else 2 def _epoch( self, X: np.ndarray, batch_size: int, updates_epoch: int, constants: Dict[str, float], progressbar: tqdm, ) -> None: """ Run a single epoch. This function shuffles the data internally, as this improves performance. :param X: The training data. :param batch_size: The batch size :param updates_epoch: The number of updates to perform per epoch :param constants: A dictionary containing the constants with which to update the parameters in self.parameters. :param progressbar: The progressbar instance to show and update during training """ # Create batches X_ = self._create_batches(X, batch_size) X_len = np.prod(X.shape[:-1]) # Initialize the previous activation prev = self._init_prev(X_) prev = self.distance_function(X_[0], self.weights)[0] influences = self._update_params(prev) # Iterate over the training data for idx, x in enumerate(X_): # Our batches are padded, so we need to # make sure we know when we hit the padding # so we don't inadvertently learn zeroes. diff = X_len - (idx * batch_size) if diff and diff < batch_size: x = x[:diff] # Prev_activation may be None if prev is not None: prev = prev[:diff] # if idx > 0 and idx % update_step == 0: influences = self._update_params(prev) prev = self._propagate(x, influences, prev_activation=prev) if progressbar is not None: progressbar.update(batch_size) def _update_params(self, constants: np.ndarray) -> np.ndarray: """Update the params.""" constants = np.max(np.min(constants, 1)) self.params["r"]["value"] = max([self.params["r"]["value"], constants]) epsilon = constants / self.params["r"]["value"] influence = self._calculate_influence(epsilon) # Account for learning rate return influence * epsilon def _calculate_influence(self, epsilon: float) -> np.ndarray: """ Pre-calculate the influence for a given value of epsilon. The neighborhood has size num_neurons * num_neurons, so for a 30 * 30 map, the neighborhood will be size (900, 900). :param epsilon: The neighborhood value. :param neighborhood: The influence from each neuron to each other neuron. """ n = (self.beta - 1) * np.log(1 + epsilon * (np.e - 1)) + 1 grid = np.exp((-self.distance_grid) / n ** 2) return grid.reshape(self.num_neurons, self.num_neurons)
stephantul/somber
somber/plsom.py
Python
mit
5,148
[ "NEURON" ]
f748307d5df453c2b64c308803268827e007b60ab7ff29b22691b8b56628339d
import espressomd import numpy as np from oif_utils import * from espressomd.interactions import OifLocalForces from espressomd.interactions import OifGlobalForces from espressomd.interactions import OifOutDirection class FixedPoint(object): """ Represents mesh points, not connected to any ESPResSo particle. """ def __init__(self, pos, id): if not isinstance(id, int): raise TypeError("Id must be integer.") if not ((len(pos) == 3) and isinstance(pos[0],float) and isinstance(pos[1],float) and isinstance(pos[2],float)): raise TypeError("Pos must be a list of three floats.") self.x = pos[0] self.y = pos[1] self.z = pos[2] self.id = id def get_pos(self): return [self.x, self.y, self.z] def get_id(self): return self.id class PartPoint(object): """ Represents mesh points, connected to ESPResSo particle. """ def __init__(self, part, id, part_id): # part is physical ESPResSo particle corresponding to that particular point if not (isinstance(part, espressomd.particle_data.ParticleHandle) and isinstance(id,int) and isinstance(part_id,int)): raise TypeError("Arguments to PartPoint are incorrect.") self.part = part self.part_id = part_id # because in adding bonds to the particles in OifCell # one needs to know the global id of the particle. self.id = id def get_pos(self): return self.part.pos def get_vel(self): return self.part.v def get_mass(self): return self.part.mass def get_type(self): return self.part.type def get_force(self): return self.part.f def set_pos(self,pos): self.part.pos = pos def set_vel(self, vel): self.part.v = vel def set_force(self, force): self.part.ext_force = force def kill_motion(self): self.part.fix = [1, 1, 1] def unkill_motion(self): self.part.unfix() class Edge(object): """ Represents edges in a mesh. """ def __init__(self, A, B): if not (isinstance(A,PartPoint) or (isinstance(A,FixedPoint))) and (isinstance(B,PartPoint) or (isinstance(B,FixedPoint))): TypeError("Arguments to Edge must be FixedPoint or PartPoint.") self.A = A self.B = B def length(self): return vec_distance(self.A.get_pos(), self.B.get_pos()) class Triangle(object): """ Represents triangles in a mesh. """ def __init__(self, A, B, C): if not (isinstance(A,PartPoint) or (isinstance(A,FixedPoint))) and (isinstance(B,PartPoint) or (isinstance(B,FixedPoint))) and (isinstance(C,PartPoint) or (isinstance(C,FixedPoint))): TypeError("Arguments to Triangle must be FixedPoint or PartPoint.") self.A = A self.B = B self.C = C def area(self): area = area_triangle(self.A.get_pos(), self.B.get_pos(), self.C.get_pos()) return area class Angle(object): """ Represents angles in a mesh. """ def __init__(self, A, B, C, D): if not (isinstance(A, PartPoint) or (isinstance(A, FixedPoint))) \ and (isinstance(B, PartPoint) or (isinstance(B, FixedPoint))) \ and (isinstance(C, PartPoint) or (isinstance(C, FixedPoint))) \ and (isinstance(D, PartPoint) or (isinstance(D, FixedPoint))): TypeError("Arguments to Angle must be FixedPoint or PartPoint.") self.A = A self.B = B self.C = C self.D = D def size(self): angle_size = angle_btw_triangles(self.A.get_pos(), self.B.get_pos(), self.C.get_pos(), self.D.get_pos()) return angle_size class ThreeNeighbors(object): """ Represents three best spatially distributed neighbors of a point in a mesh. """ def __init__(self, A, B, C): if not (isinstance(A, PartPoint) or (isinstance(A, FixedPoint))) \ and (isinstance(B, PartPoint) or (isinstance(B, FixedPoint))) \ and (isinstance(C, PartPoint) or (isinstance(C, FixedPoint))): TypeError("Arguments to ThreeNeighbors must be FixedPoint or PartPoint.") self.A = A self.B = B self.C = C def outer_normal(self): outer_normal = get_triangle_normal(self.A.get_pos(), self.B.get_pos(), self.C.get_pos()) return outer_normal class Mesh(object): """ Represents a triangular mesh. """ def __init__(self, nodes_file=None, triangles_file=None, system=None, resize=(1.0, 1.0, 1.0), particle_type=-1, particle_mass=1.0, normal=False, check_orientation=True): if (system is None) or (not isinstance(system,espressomd.System)): raise Exception("Mesh: No system provided or wrong type given. Quitting.") self.system = system self.normal = normal self.nodes_file = nodes_file self.triangles_file = triangles_file self.points = [] self.edges = [] self.triangles = [] self.angles = [] self.neighbors = [] self.ids_extremal_points = [0, 0, 0, 0, 0, 0, 0] if not ((nodes_file is None) or (triangles_file is None)): if not (isinstance(nodes_file,str) and isinstance(triangles_file,str)): raise TypeError("Mesh: Filenames must be strings.") if not ((len(resize) == 3) and isinstance(resize[0],float) and isinstance(resize[1],float) and isinstance(resize[2],float)): raise TypeError("Mesh: Pos must be a list of three floats.") if not isinstance(particle_type,int): raise TypeError("Mesh: particle_type must be integer.") if not isinstance(particle_mass,float): raise TypeError("Mesh: particle_mass must be float.") if not isinstance(normal,bool): raise TypeError("Mesh: normal must be bool.") if not isinstance(check_orientation,bool): raise TypeError("Mesh: check_orientation must be bool.") # reading the mesh point positions from file in_file = open(nodes_file, "r") nodes_coord = in_file.read().split("\n") in_file.close() # removes a blank line at the end of the file if there is any: nodes_coord = filter(None, nodes_coord) # here we have list of lines with triplets of strings for line in nodes_coord: # extracts coordinates from the string line line = np.array([float(x) for x in line.split()]) coords = np.array(resize) * line tmp_fixed_point = FixedPoint(coords, len(self.points)) self.points.append(tmp_fixed_point) # searching for extremal points IDs x_min = large_number x_max = -large_number y_min = large_number y_max = -large_number z_min = large_number z_max = -large_number for tmp_fixed_point in self.points: coords = tmp_fixed_point.get_pos() if coords[0] < x_min: x_min = coords[0] self.ids_extremal_points[0] = tmp_fixed_point.get_id() if coords[0] > x_max: x_max = coords[0] self.ids_extremal_points[1] = tmp_fixed_point.get_id() if coords[1] < y_min: y_min = coords[1] self.ids_extremal_points[2] = tmp_fixed_point.get_id() if coords[1] > y_max: y_max = coords[1] self.ids_extremal_points[3] = tmp_fixed_point.get_id() if coords[2] < z_min: z_min = coords[2] self.ids_extremal_points[4] = tmp_fixed_point.get_id() if coords[2] > z_max: z_max = coords[2] self.ids_extremal_points[5] = tmp_fixed_point.get_id() # reading the triangle incidences from file in_file = open(triangles_file, "r") triangles_incid = in_file.read().split("\n") in_file.close() # removes a blank line at the end of the file if there is any: triangles_incid = filter(None, triangles_incid) for line in triangles_incid: # extracts incidences from the string line incid = np.array([int(x) for x in line.split()]) tmp_triangle = Triangle(self.points[incid[0]], self.points[incid[1]], self.points[incid[2]]) self.triangles.append(tmp_triangle) if check_orientation is True: # check whether all triangles in file had the same orientation; if not, correct the orientation self.check_orientation() # creating list of edge incidences from triangle incidences # using temporary list of edge incidences tmp_edge_incidences = [] for triangle in self.triangles: pa = triangle.A.id pb = triangle.B.id pc = triangle.C.id if ([pa, pb] not in tmp_edge_incidences) and ([pb, pa] not in tmp_edge_incidences): tmp_edge_incidences.append([pa, pb]) if ([pb, pc] not in tmp_edge_incidences) and ([pc, pb] not in tmp_edge_incidences): tmp_edge_incidences.append([pb, pc]) if ([pa, pc] not in tmp_edge_incidences) and ([pc, pa] not in tmp_edge_incidences): tmp_edge_incidences.append([pa, pc]) for tmp_incid in tmp_edge_incidences: tmp_edge = Edge(self.points[tmp_incid[0]], self.points[tmp_incid[1]]) self.edges.append(tmp_edge) # creating list angles (former bending incidences) from triangle incidences for edge in self.edges: pa = edge.A.id pb = edge.B.id pc = -1 pd = -1 detected = 0 # detected = number of detected triangles with current edge common # Algorithm is as follows: we run over all triangles and check # whether two vertices are those from current edge. If we find such triangle, # we put the ID of the third vertex to pc and we check if the orientation pa, pb, pc is the same as # was in the triangle list (meaning, that we found one of the following three triples # in the triangle list: pa, pb, pc or pb, pc, pa or pc, pa, pb). # If we have the same orientation, we set orient = 1, otherwise orient = -1. # Then we go further looking for the second triangle. # The second triangle should have the opposite orientation. # The normal of the first triangle will be P1P2 x P1P3, of the second triangle will be P2P4 x P2P3 orient = 0 for triangle in self.triangles: # Run over all triangles and determine the two triangles with the common current edge if (pa == triangle.A.id) and (pb == triangle.B.id): if detected == 0: # if no triangle with such edge was detected before pc = triangle.C.id detected = 1 orient = 1 else: # if this is the second triangle with this edge, then also quit the for-loop over triangles pd = triangle.C.id break if (pa == triangle.B.id) and (pb == triangle.C.id): if detected == 0: pc = triangle.A.id detected = 1 orient = 1 else: pd = triangle.A.id break if (pa == triangle.C.id) and (pb == triangle.A.id): if detected == 0: pc = triangle.B.id detected = 1 orient = 1 else: pd = triangle.B.id break if (pa == triangle.B.id) and (pb == triangle.A.id): if detected == 0: pc = triangle.C.id detected = 1 orient = -1 else: pd = triangle.C.id break if (pa == triangle.C.id) and (pb == triangle.B.id): if detected == 0: pc = triangle.A.id detected = 1 orient = -1 else: pd = triangle.A.id break if (pa == triangle.A.id) and (pb == triangle.C.id): if detected == 0: pc = triangle.B.id detected = 1 orient = -1 else: pd = triangle.B.id break if orient == 1: tmp = pd pd = pc pc = tmp tmp_angle = Angle(self.points[pc], self.points[pa], self.points[pb], self.points[pd]) self.angles.append(tmp_angle) # creating list of three neighbors for membrane collision if normal is True: for point in self.points: tmp_neighbors = [] # cycle through edges and select those that contain point for edge in self.edges: # take an edge and copy the nodes of the edge to pa, pb if edge.A.id == point.id: tmp_neighbors.append(edge.B) if edge.B.id == point.id: tmp_neighbors.append(edge.A) # create vectors to all neighbors and normalize them tmp_vectors_to_neighbors = [] p_coords = np.array(point.get_pos()) for neighbor in tmp_neighbors: tmp_vector = neighbor.get_pos() - p_coords tmp_length = norm(tmp_vector) if tmp_length < small_epsilon: raise Exception("Mesh: Degenerate edge. Quitting.") tmp_vector /= tmp_length tmp_vectors_to_neighbors.append(tmp_vector) # check all triplets of neighbors and select the one that is best spatially distributed # by adding the corresponding three normalized vectors # and selecting the one with smallest resultant vector n_neighbors = len(tmp_neighbors) min_length = large_number best_neighbors = [tmp_neighbors[0], tmp_neighbors[1], tmp_neighbors[2]] for i in range(0,n_neighbors): for j in range(i+1,n_neighbors): for k in range(j+1,n_neighbors): tmp_result_vector = tmp_vectors_to_neighbors[i] + tmp_vectors_to_neighbors[j] + \ tmp_vectors_to_neighbors[k] tmp_result_vector_length = norm(tmp_result_vector) if tmp_result_vector_length < min_length: min_length = tmp_result_vector_length best_neighbors = [tmp_neighbors[i], tmp_neighbors[j], tmp_neighbors[k]] # find one triangle that contains this point and compute its normal vector for triangle in self.triangles: if triangle.A.id == point.id or triangle.B.id == point.id or triangle.C.id == point.id: tmp_normal_triangle = get_triangle_normal(triangle.A.get_pos(), triangle.B.get_pos(), triangle.C.get_pos()) break # properly orient selected neighbors and save them to the list of neighbors tmp_normal_neighbors = get_triangle_normal(best_neighbors[0].get_pos(), best_neighbors[1].get_pos(), best_neighbors[2].get_pos()) tmp_length_normal_triangle = norm(tmp_normal_triangle) tmp_length_normal_neighbors = norm(tmp_normal_neighbors) tmp_product = np.dot(tmp_normal_triangle, tmp_normal_neighbors) / \ (tmp_length_normal_triangle * tmp_length_normal_neighbors) tmp_angle = np.arccos(tmp_product) if tmp_angle > np.pi/2.0: selected_neighbors = ThreeNeighbors(best_neighbors[0], best_neighbors[1], best_neighbors[2]) else: selected_neighbors = ThreeNeighbors(best_neighbors[0], best_neighbors[2], best_neighbors[1]) self.neighbors.append(selected_neighbors) else: for point in self.points: selected_neighbors = ThreeNeighbors(point, point, point) self.neighbors.append(selected_neighbors) def copy(self, origin=None, particle_type=-1, particle_mass=1.0, rotate=None): mesh = Mesh(system=self.system) mesh.ids_extremal_points = self.ids_extremal_points rotation = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) if rotate is not None: # variables for rotation ca = np.cos(rotate[0]) sa = np.sin(rotate[0]) cb = np.cos(rotate[1]) sb = np.sin(rotate[1]) cc = np.cos(rotate[2]) sc = np.sin(rotate[2]) rotation = np.array([[cb * cc, sa * sb * cc - ca * sc, sc * sa + cc * sb * ca], [cb * sc, ca * cc + sa * sb * sc, sc * sb * ca - cc * sa], [-sb, cb * sa, ca * cb]]) for point in self.points: # PartPoints are created tmp_pos = point.get_pos() tmp_rotate_pos = np.array(point.get_pos()) # rotation of nodes if rotate is not None: tmp_pos = rotation.dot(tmp_rotate_pos) tmp_pos = [discard_epsilon(tmp_pos[0]), discard_epsilon(tmp_pos[1]), discard_epsilon(tmp_pos[2])] if origin is not None: tmp_pos += np.array(origin) new_part_id = len(self.system.part) # to remember the global id of the ESPResSo particle self.system.part.add(pos=tmp_pos, type=particle_type, mass=particle_mass, mol_id=particle_type) new_part = self.system.part[new_part_id] new_part_point = PartPoint(new_part, len(mesh.points), new_part_id) mesh.points.append(new_part_point) for edge in self.edges: new_edge = Edge(mesh.points[edge.A.id], mesh.points[edge.B.id]) mesh.edges.append(new_edge) for triangle in self.triangles: new_triangle = Triangle(mesh.points[triangle.A.id], mesh.points[triangle.B.id], mesh.points[triangle.C.id]) mesh.triangles.append(new_triangle) for angle in self.angles: new_angle = Angle(mesh.points[angle.A.id], mesh.points[angle.B.id], mesh.points[angle.C.id], mesh.points[angle.D.id]) mesh.angles.append(new_angle) for neighbors in self.neighbors: new_neighbors = ThreeNeighbors(mesh.points[neighbors.A.id], mesh.points[neighbors.B.id], mesh.points[neighbors.C.id]) mesh.neighbors.append(new_neighbors) return mesh def check_orientation(self): tmp_triangle_list = [] tmp_triangle_list_ok = [] t_ok = None corrected_triangle = None for triangle in self.triangles: tmp_triangle_list.append(triangle) # move the first triangle to the checked and corrected list tmp_triangle_list_ok.append(tmp_triangle_list[0]) tmp_triangle_list.pop(0) while len(tmp_triangle_list) != 0: i = 0 while i < len(tmp_triangle_list): tmp_triangle = tmp_triangle_list[i] for correct_triangle in tmp_triangle_list_ok: # check if triangles have a common edge, if so, check orientation are_neighbors = True if tmp_triangle.A.id == correct_triangle.A.id: if tmp_triangle.B.id == correct_triangle.B.id: t_ok = False # this is situation 123 and 124 corrected_triangle = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.B.id == correct_triangle.C.id: t_ok = True # this is situation 123 and 142 else: if tmp_triangle.C.id == correct_triangle.B.id: t_ok = True # this is situation 123 and 134 else: if tmp_triangle.C.id == correct_triangle.C.id: t_ok = False # this is situation 123 and 143 corrected_triangle = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: are_neighbors = False else: if tmp_triangle.A.id == correct_triangle.B.id: if tmp_triangle.B.id == correct_triangle.C.id: t_ok = False # this is situation 123 and 412 corrected_triangle = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.B.id == correct_triangle.A.id: t_ok = True # this is situation 123 and 214 else: if tmp_triangle.C.id == correct_triangle.C.id: t_ok = True # this is situation 123 and 413 else: if tmp_triangle.C.id == correct_triangle.A.id: t_ok = False # this is situation 123 and 314 corrected_triangle = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: are_neighbors = False else: if tmp_triangle.A.id == correct_triangle.C.id: if tmp_triangle.B.id == correct_triangle.A.id: t_ok = False # this is situation 123 and 241 corrected_triangle = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.B.id == correct_triangle.B.id: t_ok = True # this is situation 123 and 421 else: if tmp_triangle.C.id == correct_triangle.A.id: t_ok = True # this is situation 123 and 341 else: if tmp_triangle.C.id == correct_triangle.B.id: t_ok = False # this is situation 123 and 431 corrected_triangle = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: are_neighbors = False else: if tmp_triangle.B.id == correct_triangle.A.id: if tmp_triangle.C.id == correct_triangle.B.id: t_ok = False # this is situation 123 and 234 corrected_triangle = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.C.id == correct_triangle.C.id: t_ok = True # this is situation 123 and 243 else: are_neighbors = False else: if tmp_triangle.B.id == correct_triangle.B.id: if tmp_triangle.C.id == correct_triangle.C.id: t_ok = False # this is situation 123 and 423 corrected_triangle = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.C.id == correct_triangle.A.id: t_ok = True # this is situation 123 and 324 else: are_neighbors = False else: if tmp_triangle.B.id == correct_triangle.C.id: if tmp_triangle.C.id == correct_triangle.A.id: t_ok = False # this is situation 123 and 342 corrected_triangle = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) else: if tmp_triangle.C.id == correct_triangle.B.id: t_ok = True # this is situation 123 and 432 else: are_neighbors = False else: are_neighbors = False if are_neighbors: # move the tmp_triangle to the checked and corrected list if t_ok: tmp_triangle_list_ok.append(tmp_triangle) else: tmp_triangle_list_ok.append(corrected_triangle) tmp_triangle_list.pop(i) break i += 1 # replace triangles with checked triangles i = 0 for tmp_triangle in tmp_triangle_list_ok: self.triangles[i] = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) i += 1 # all triangles now have the same orientation, check if it is correct tmp_volume = self.volume() if tmp_volume < 0: # opposite orientation, flip all triangles i = 0 for tmp_triangle in self.triangles: self.triangles[i] = Triangle(tmp_triangle.A, tmp_triangle.C, tmp_triangle.B) i += 1 return 0 def surface(self): surface = 0.0 for triangle in self.triangles: surface += triangle.area() return surface def volume(self): volume = 0.0 for triangle in self.triangles: tmp_normal = get_triangle_normal(triangle.A.get_pos(), triangle.B.get_pos(), triangle.C.get_pos()) tmp_normal_length = norm(tmp_normal) tmp_sum_z_coords = 1.0 / 3.0 * (triangle.A.get_pos()[2] + triangle.B.get_pos()[2] + triangle.C.get_pos()[2]) volume -= triangle.area() * tmp_normal[2] / tmp_normal_length * tmp_sum_z_coords return volume def get_n_nodes(self): return len(self.points) def get_n_triangles(self): return len(self.triangles) def get_n_edges(self): return len(self.edges) def output_mesh_triangles(self, triangles_file=None): # this is useful after the mesh correction # output of mesh nodes can be done from OifCell (this is because their position may change) if triangles_file is None: raise Exception("OifMesh: No file_name provided for triangles. Quitting.") output_file = open(triangles_file, "w") for t in self.triangles: output_file.write(str(t.A.id) + " " + str(t.B.id) + " " + str(t.C.id) + "\n") output_file.close() return 0 def mirror(self, mirror_x=0, mirror_y=0, mirror_z=0, out_file_name=""): if out_file_name == "": raise Exception("Cell.Mirror: output meshnodes file for new mesh is missing. Quitting.") if (mirror_x!=0 and mirror_x != 1) or (mirror_y!=0 and mirror_y != 1) or (mirror_z!=0 and mirror_z != 1): raise Exception("Mesh.Mirror: for mirroring only values 0 or 1 are accepted. 1 indicates that the corresponding coordinate will be flipped. Exiting.") if mirror_x + mirror_y + mirror_z > 1: raise Exception("Mesh.Mirror: flipping allowed only for one axis. Exiting.") if mirror_x + mirror_y + mirror_z == 1: out_file = open(out_file_name, "w") for p in self.points: coor = p.get_pos() if mirror_x == 1: coor[0] *= -1.0 if mirror_y == 1: coor[1] *= -1.0 if mirror_z == 1: coor[2] *= -1.0 out_file.write(custom_str(coor[0]) + " " + custom_str(coor[1]) + " " + custom_str(coor[2]) + "\n") out_file.close() return 0 class OifCellType(object): # analogous to oif_template """ Represents a template for creating elastic objects. """ def __init__(self, nodes_file="", triangles_file="", system=None, resize=(1.0, 1.0, 1.0), ks=0.0, kslin=0.0, kb=0.0, kal=0.0, kag=0.0, kv=0.0, kvisc=0.0, normal=False, check_orientation=True): if (system is None) or (not isinstance(system,espressomd.System)): raise Exception("OifCellType: No system provided or wrong type. Quitting.") if (nodes_file == "") or (triangles_file == ""): raise Exception("OifCellType: One of nodesfile or trianglesfile is missing. Quitting.") if not (isinstance(nodes_file,str) and isinstance(triangles_file,str)): raise TypeError("OifCellType: Filenames must be strings.") if not ((len(resize) == 3) and isinstance(resize[0],float) and isinstance(resize[1],float) and isinstance(resize[2],float)): raise TypeError("OifCellType: Resize must be a list of three floats.") if not (isinstance(ks,float) and isinstance(ks,float) and isinstance(kb,float) and isinstance(kal,float) and isinstance(kag,float) and isinstance(kv,float) and isinstance(kvisc,float)): raise TypeError("OifCellType: Elastic parameters must be floats.") if not isinstance(normal,bool): raise TypeError("OifCellType: normal must be bool.") if not isinstance(check_orientation,bool): raise TypeError("OifCellType: check_orientation must be bool.") if (ks != 0.0) and (kslin != 0.0): raise Exception("OifCellType: Cannot use linear and nonlinear stretching at the same time. Quitting.") self.system = system self.mesh = Mesh(nodes_file=nodes_file, triangles_file=triangles_file, system=system, resize=resize, normal=normal, check_orientation=check_orientation) self.local_force_interactions = [] self.resize = resize self.ks = ks self.kslin = kslin self.kb = kb self.kal = kal self.kag = kag self.kv = kv self.kvisc = kvisc self.normal = normal if (ks != 0.0) or (kslin != 0.0) or (kb != 0.0) or (kal != 0.0): for angle in self.mesh.angles: r0 = vec_distance(angle.B.get_pos(), angle.C.get_pos()) phi = angle_btw_triangles(angle.A.get_pos(), angle.B.get_pos(), angle.C.get_pos(), angle.D.get_pos()) area1 = area_triangle(angle.A.get_pos(), angle.B.get_pos(), angle.C.get_pos()) area2 = area_triangle(angle.D.get_pos(), angle.B.get_pos(), angle.C.get_pos()) tmp_local_force_inter = OifLocalForces(r0=r0, ks=ks, kslin=kslin, phi0=phi, kb=kb, A01=area1, A02=area2, kal=kal, kvisc=kvisc) self.local_force_interactions.append([tmp_local_force_inter, [angle.A, angle.B, angle.C, angle.D]]) self.system.bonded_inter.add(tmp_local_force_inter) if (kag != 0.0) or (kv != 0.0): surface = self.mesh.surface() volume = self.mesh.volume() self.global_force_interaction = OifGlobalForces(A0_g=surface, ka_g=kag, V0=volume, kv=kv) self.system.bonded_inter.add(self.global_force_interaction) def print_info(self): print("\nThe following OifCellType was created: ") print("\t nodes_file: " + self.mesh.nodes_file) print("\t triangles_file: " + self.mesh.triangles_file) print("\t n_nodes: " + str(self.mesh.get_n_nodes())) print("\t n_triangles: " + str(self.mesh.get_n_triangles())) print("\t n_edges: " + str(self.mesh.get_n_edges())) print("\t ks: " + custom_str(self.ks)) print("\t kslin: " + custom_str(self.kslin)) print("\t kb: " + custom_str(self.kb)) print("\t kal: " + custom_str(self.kal)) print("\t kag: " + custom_str(self.kag)) print("\t kv: " + custom_str(self.kv)) print("\t kvisc: " + custom_str(self.kvisc)) print("\t normal: " + str(self.normal)) print("\t resize: " + str(self.resize)) print(" ") class OifCell(object): """ Represents a concrete elastic object. """ def __init__(self, cell_type=None, origin=None, particle_type=None, particle_mass=1.0, rotate=None): if (cell_type is None) or (not isinstance(cell_type,OifCellType)): raise Exception("OifCell: No cellType provided or wrong type. Quitting.") if (origin is None) or \ (not ((len(origin) == 3) and isinstance(origin[0],float) and isinstance(origin[1],float) and isinstance(origin[2],float))): raise TypeError("Origin must be tuple.") if (particle_type is None) or (not isinstance(particle_type,int)): raise Exception("OifCell: No particle_type specified or wrong type. Quitting.") if not isinstance(particle_mass,float): raise Exception("OifCell: particle mass must be float.") if (rotate is not None) and not ((len(rotate) == 3) and isinstance(rotate[0],float) and isinstance(rotate[1],float) and isinstance(rotate[2],float)): raise TypeError("Rotate must be list of three floats.") self.cell_type = cell_type self.cell_type.system.max_oif_objects =self.cell_type.system.max_oif_objects+1 self.mesh = cell_type.mesh.copy(origin=origin, particle_type=particle_type, particle_mass=particle_mass, rotate=rotate) self.particle_mass = particle_mass self.particle_type = particle_type self.origin = origin self.rotate = rotate for inter in self.cell_type.local_force_interactions: esp_inter = inter[0] points = inter[1] n_points = len(points) if n_points == 2: p0 = self.mesh.points[points[0].id] # Getting PartPoints from id's of FixedPoints p1 = self.mesh.points[points[1].id] p0.part.add_bond((esp_inter, p1.part_id)) if n_points == 3: p0 = self.mesh.points[points[0].id] p1 = self.mesh.points[points[1].id] p2 = self.mesh.points[points[2].id] p0.part.add_bond((esp_inter, p1.part_id, p2.part_id)) if n_points == 4: p0 = self.mesh.points[points[0].id] p1 = self.mesh.points[points[1].id] p2 = self.mesh.points[points[2].id] p3 = self.mesh.points[points[3].id] p1.part.add_bond((esp_inter, p0.part_id, p2.part_id, p3.part_id)) if (self.cell_type.kag!=0.0) or (self.cell_type.kv!=0.0): for triangle in self.mesh.triangles: triangle.A.part.add_bond((self.cell_type.global_force_interaction, triangle.B.part_id, triangle.C.part_id)) # setting the out_direction interaction for membrane collision if self.cell_type.mesh.normal is True: tmp_out_direction_interaction = OifOutDirection() # this interaction could be just one for all objects, but here it is created multiple times self.cell_type.system.bonded_inter.add(tmp_out_direction_interaction) for p in self.mesh.points: p.part.add_bond((tmp_out_direction_interaction, self.mesh.neighbors[p.id].A.part_id, self.mesh.neighbors[p.id].B.part_id, self.mesh.neighbors[p.id].C.part_id)) def get_origin(self): center = np.array([0.0, 0.0, 0.0]) for p in self.mesh.points: center += p.get_pos() return center/len(self.mesh.points) def set_origin(self, new_origin = (0.0, 0.0, 0.0)): old_origin = self.get_origin() for p in self.mesh.points: new_position = p.get_pos() - old_origin + new_origin p.set_pos(new_position) def get_approx_origin(self): approx_center = np.array([0.0, 0.0, 0.0]) for id in self.mesh.ids_extremal_points: approx_center += self.mesh.points[id].get_pos() return approx_center/len(self.mesh.ids_extremal_points) def get_origin_folded(self): origin = self.get_origin() return np.mod(origin, self.cell_type.system.box_l) def get_velocity(self): velocity = np.array([0.0, 0.0, 0.0]) for p in self.mesh.points: velocity += p.get_vel() return velocity/len(self.mesh.points) def set_velocity(self, new_velocity = (0.0, 0.0, 0.0)): for p in self.mesh.points: p.set_vel(new_velocity) def pos_bounds(self): x_min = large_number x_max = -large_number y_min = large_number y_max = -large_number z_min = large_number z_max = -large_number for p in self.mesh.points: coords = p.get_pos() if coords[0] < x_min: x_min = coords[0] if coords[0] > x_max: x_max = coords[0] if coords[1] < y_min: y_min = coords[1] if coords[1] > y_max: y_max = coords[1] if coords[2] < z_min: z_min = coords[2] if coords[2] > z_max: z_max = coords[2] return [x_min, x_max, y_min, y_max, z_min, z_max] def surface(self): return self.mesh.surface() def volume(self): return self.mesh.volume() def diameter(self): max_distance = 0.0 n_points = len(self.mesh.points) for i in range(0, n_points): for j in range(i+1, n_points): p1 = self.mesh.points[i].get_pos() p2 = self.mesh.points[j].get_pos() tmp_dist = vec_distance(p1,p2) if tmp_dist > max_distance: max_distance = tmp_dist return max_distance def get_n_nodes(self): return self.mesh.get_n_nodes() def set_force(self, new_force = (0.0, 0.0, 0.0)): for p in self.mesh.points: p.set_force(new_force) # this is not implemented # def kill_motion(self): # for p in self.mesh.points: # p.kill_motion() # this is not implemented # def unkill_motion(self): # for p in self.mesh.points: # p.unkill_motion() def output_vtk_pos(self, file_name=None): if file_name is None: raise Exception("OifCell: No file_name provided for vtk output. Quitting") n_points = len(self.mesh.points) n_triangles = len(self.mesh.triangles) output_file = open(file_name, "w") output_file.write("# vtk DataFile Version 3.0\n") output_file.write("Data\n") output_file.write("ASCII\n") output_file.write("DATASET POLYDATA\n") output_file.write("POINTS " + str(n_points) + " float\n") for p in self.mesh.points: coords = p.get_pos() output_file.write(custom_str(coords[0]) + " " + custom_str(coords[1]) + " " + custom_str(coords[2]) + "\n") output_file.write("TRIANGLE_STRIPS " + str(n_triangles) + " " + str(4*n_triangles) + "\n") for t in self.mesh.triangles: output_file.write("3 " + str(t.A.id) + " " + str(t.B.id) + " " + str(t.C.id) + "\n") output_file.close() def output_vtk_pos_folded(self, file_name=None): if file_name is None: raise Exception("OifCell: No file_name provided for vtk output. Quitting.") n_points = len(self.mesh.points) n_triangles = len(self.mesh.triangles) # get coordinates of the origin center = np.array([0.0, 0.0, 0.0]) for p in self.mesh.points: center += p.get_pos() center /= len(self.mesh.points) center_folded = np.floor(center/self.cell_type.system.box_l) # this gives how many times the origin is folded in all three directions output_file = open(file_name, "w") output_file.write("# vtk DataFile Version 3.0\n") output_file.write("Data\n") output_file.write("ASCII\n") output_file.write("DATASET POLYDATA\n") output_file.write("POINTS " + str(n_points) + " float\n") for p in self.mesh.points: coords = p.get_pos() - center_folded * self.cell_type.system.box_l output_file.write(custom_str(coords[0]) + " " + custom_str(coords[1]) + " " + custom_str(coords[2]) + "\n") output_file.write("TRIANGLE_STRIPS " + str(n_triangles) + " " + str(4 * n_triangles) + "\n") for t in self.mesh.triangles: output_file.write("3 " + str(t.A.id) + " " + str(t.B.id) + " " + str(t.C.id) + "\n") output_file.close() def append_point_data_to_vtk(self, file_name=None, data_name=None, data=None, first_append=None): if file_name is None: raise Exception("OifCell: append_point_data_to_vtk: No file_name provided. Quitting.") if data is None: raise Exception("OifCell: append_point_data_to_vtk: No data provided. Quitting.") return if data_name is None: raise Exception("OifCell: append_point_data_to_vtk: No data_name provided. Quitting.") if first_append is None: raise Exception("OifCell: append_point_data_to_vtk: Need to know whether this is the first data list to be " "appended for this file. Quitting.") n_points = self.get_n_nodes() if (len(data) != n_points): raise Exception("OifCell: append_point_data_to_vtk: Number of data points does not match number of mesh points. Quitting.") output_file = open(file_name, "a") if first_append is True: output_file.write("POINT_DATA " + str(n_points) + "\n") output_file.write("SCALARS " + data_name + " float 1\n") output_file.write("LOOKUP_TABLE default\n") for p in self.mesh.points: output_file.write(str(data[p.id]) + "\n") output_file.close() def output_raw_data(self, file_name=None, data=None): if file_name is None: raise Exception("OifCell: output_raw_data: No file_name provided. Quitting.") if data is None: raise Exception("OifCell: output_raw_data: No data provided. Quitting.") n_points = self.get_n_nodes() if (len(data) != n_points): raise Exception("OifCell: output_raw_data: Number of data points does not match number of mesh points. Quitting.") output_file = open(file_name, "w") for p in self.mesh.points: output_file.write(" ".join(map(str,data[p.id])) + "\n") output_file.close() def output_mesh_points(self, file_name=None): if file_name is None: raise Exception("OifCell: No file_name provided for mesh nodes output. Quitting.") output_file = open(file_name, "w") center = self.get_origin() for p in self.mesh.points: coords = p.get_pos() - center output_file.write(custom_str(coords[0]) + " " + custom_str(coords[1]) + " " + custom_str(coords[2]) + "\n") output_file.close() def set_mesh_points(self, file_name=None): if file_name is None: raise Exception("OifCell: No file_name provided for set_mesh_points. Quitting.") center = self.get_origin() n_points = self.get_n_nodes() in_file = open(file_name, "r") nodes_coord = in_file.read().split("\n") in_file.close() # removes a blank line at the end of the file if there is any: nodes_coord = filter(None, nodes_coord) # here we have list of lines with triplets of strings if len(nodes_coord) != n_points: raise Exception("OifCell: Mesh nodes not set to new positions: " "number of lines in the file does not equal number of Cell nodes. Quitting.") else: i = 0 for line in nodes_coord: # extracts coordinates from the string line line = line.split() new_position = np.array(line).astype(np.float) + center self.mesh.points[i].set_pos(new_position) i += 1 def print_info(self): print("\nThe following OifCell was created: ") print("\t particle_mass: " + custom_str(self.particle_mass)) print("\t particle_type: " + str(self.particle_type)) print("\t rotate: " + str(self.rotate)) print("\t origin: " + str(self.origin[0]) + " " + str(self.origin[1]) + " " + str(self.origin[2])) def elastic_forces(self, el_forces=(0, 0, 0, 0, 0, 0), f_metric=(0, 0, 0, 0, 0, 0), vtk_file=None, raw_data_file=None): # the order of parameters in elastic_forces and in f_metric is as follows (ks, kb, kal, kag, kv, total) # vtk_file means that a vtk file for visualisation of elastic forces will be written # raw_data_file means that just the elastic forces will be written into the output file stretching_forces_list = [] bending_forces_list = [] local_area_forces_list = [] global_area_forces_list = [] volume_forces_list = [] elastic_forces_list = [] stretching_forces_norms_list = [] bending_forces_norms_list = [] local_area_forces_norms_list = [] global_area_forces_norms_list = [] volume_forces_norms_list = [] elastic_forces_norms_list = [] ks_f_metric = 0.0 kb_f_metric = 0.0 kal_f_metric = 0.0 kag_f_metric = 0.0 kv_f_metric = 0.0 total_f_metric = 0.0 for i in range(0,6): if (el_forces[i] != 0) and (el_forces[i] != 1): raise Exception("OifCell: elastic_forces: Incorrect argument. el_forces has to be a sixtuple of 0s and 1s, " "specifying which elastic forces will be calculated. The order in the sixtuple is (ks, kb, " "kal, kag, kv, total).") for i in range(0,6): if (f_metric[i] != 0) and (f_metric[i] != 1): raise Exception("OifCell: elastic_forces: Incorrect argument. f_metric has to be a sixtuple of 0s and 1s, " "specifying which f_metric will be calculated. The order in the sixtuple is (ks, kb, kal, " "kag, kv, total)") # calculation of stretching forces and f_metric if (el_forces[0] == 1) or (el_forces[5] == 1) or (f_metric[0] == 1) or (f_metric[5] == 1): # initialize list stretching_forces_list = [] for p in self.mesh.points: stretching_forces_list.append([0.0, 0.0, 0.0]) # calculation uses edges, but results are stored for nodes for e in self.mesh.edges: a_current_pos = e.A.get_pos() b_current_pos = e.B.get_pos() a_orig_pos = self.cell_type.mesh.points[e.A.id].get_pos() b_orig_pos = self.cell_type.mesh.points[e.B.id].get_pos() current_dist = e.length() orig_dist = vec_distance(a_orig_pos, b_orig_pos) tmp_stretching_force = oif_calc_stretching_force(self.cell_type.ks, a_current_pos, b_current_pos, orig_dist, current_dist) stretching_forces_list[e.A.id] += tmp_stretching_force stretching_forces_list[e.B.id] -= tmp_stretching_force # calculation of stretching f_metric, if needed if f_metric[0] == 1: ks_f_metric = 0.0 for p in self.mesh.points: ks_f_metric += norm(stretching_forces_list[p.id]) # calculation of bending forces and f_metric if (el_forces[1] == 1) or (el_forces[5] == 1) or (f_metric[1] == 1) or (f_metric[5] == 1): # initialize list bending_forces_list = [] for p in self.mesh.points: bending_forces_list.append([0.0, 0.0, 0.0]) # calculation uses bending incidences, but results are stored for nodes for angle in self.mesh.angles: a_current_pos = angle.A.get_pos() b_current_pos = angle.B.get_pos() c_current_pos = angle.C.get_pos() d_current_pos = angle.D.get_pos() a_orig_pos = self.cell_type.mesh.points[angle.A.id].get_pos() b_orig_pos = self.cell_type.mesh.points[angle.B.id].get_pos() c_orig_pos = self.cell_type.mesh.points[angle.C.id].get_pos() d_orig_pos = self.cell_type.mesh.points[angle.D.id].get_pos() current_angle = angle.size() orig_angle = angle_btw_triangles(a_orig_pos, b_orig_pos, c_orig_pos, d_orig_pos) tmp_bending_forces = oif_calc_bending_force(self.cell_type.kb, a_current_pos, b_current_pos, c_current_pos, d_current_pos, orig_angle, current_angle) tmp_bending_force1 = np.array([tmp_bending_forces[0], tmp_bending_forces[1], tmp_bending_forces[2]]) tmp_bending_force2 = np.array([tmp_bending_forces[3], tmp_bending_forces[4], tmp_bending_forces[5]]) bending_forces_list[angle.A.id] += tmp_bending_force1 bending_forces_list[angle.B.id] -= 0.5*tmp_bending_force1 + 0.5*tmp_bending_force2 bending_forces_list[angle.C.id] -= 0.5*tmp_bending_force1 + 0.5*tmp_bending_force2 bending_forces_list[angle.D.id] += tmp_bending_force2 # calculation of bending f_metric, if needed if f_metric[1] == 1: kb_f_metric = 0.0 for p in self.mesh.points: kb_f_metric += norm(bending_forces_list[p.id]) # calculation of local area forces and f_metric if (el_forces[2] == 1) or (el_forces[5] == 1) or (f_metric[2] == 1) or (f_metric[5] == 1): # initialize list local_area_forces_list = [] for p in self.mesh.points: local_area_forces_list.append([0.0, 0.0, 0.0]) # calculation uses triangles, but results are stored for nodes for t in self.mesh.triangles: a_current_pos = t.A.get_pos() b_current_pos = t.B.get_pos() c_current_pos = t.C.get_pos() a_orig_pos = self.cell_type.mesh.points[t.A.id].get_pos() b_orig_pos = self.cell_type.mesh.points[t.B.id].get_pos() c_orig_pos = self.cell_type.mesh.points[t.C.id].get_pos() current_area = t.area() orig_area = area_triangle(a_orig_pos, b_orig_pos, c_orig_pos) tmp_local_area_forces = oif_calc_local_area_force(self.cell_type.kal, a_current_pos, b_current_pos, c_current_pos, orig_area, current_area) local_area_forces_list[t.A.id] += np.array([tmp_local_area_forces[0], tmp_local_area_forces[1], tmp_local_area_forces[2]]) local_area_forces_list[t.B.id] += np.array([tmp_local_area_forces[3], tmp_local_area_forces[4], tmp_local_area_forces[5]]) local_area_forces_list[t.C.id] += np.array([tmp_local_area_forces[6], tmp_local_area_forces[7], tmp_local_area_forces[8]]) # calculation of local area f_metric, if needed if f_metric[2] == 1: kal_f_metric = 0.0 for p in self.mesh.points: kal_f_metric += norm(local_area_forces_list[p.id]) # calculation of global area forces and f_metric if (el_forces[3] == 1) or (el_forces[5] == 1) or (f_metric[3] == 1) or (f_metric[5] == 1): # initialize list global_area_forces_list = [] for p in self.mesh.points: global_area_forces_list.append([0.0, 0.0, 0.0]) # calculation uses triangles, but results are stored for nodes for t in self.mesh.triangles: a_current_pos = t.A.get_pos() b_current_pos = t.B.get_pos() c_current_pos = t.C.get_pos() current_surface = self.mesh.surface() orig_surface = self.cell_type.mesh.surface() tmp_global_area_forces = oif_calc_global_area_force(self.cell_type.kag, a_current_pos, b_current_pos, c_current_pos, orig_surface, current_surface) global_area_forces_list[t.A.id] += np.array([tmp_global_area_forces[0], tmp_global_area_forces[1], tmp_global_area_forces[2]]) global_area_forces_list[t.B.id] += np.array([tmp_global_area_forces[3], tmp_global_area_forces[4], tmp_global_area_forces[5]]) global_area_forces_list[t.C.id] += np.array([tmp_global_area_forces[6], tmp_global_area_forces[7], tmp_global_area_forces[8]]) # calculation of global area f_metric, if needed if f_metric[3] == 1: kag_f_metric = 0.0 for p in self.mesh.points: kag_f_metric += norm(global_area_forces_list[p.id]) # calculation of volume forces and f_metric if (el_forces[4] == 1) or (el_forces[5] == 1) or (f_metric[4] == 1) or (f_metric[5] == 1): # initialize list volume_forces_list = [] for p in self.mesh.points: volume_forces_list.append([0.0, 0.0, 0.0]) # calculation uses triangles, but results are stored for nodes for t in self.mesh.triangles: a_current_pos = t.A.get_pos() b_current_pos = t.B.get_pos() c_current_pos = t.C.get_pos() current_volume = self.mesh.volume() orig_volume = self.cell_type.mesh.volume() tmp_volume_force = oif_calc_volume_force(self.cell_type.kv, a_current_pos, b_current_pos, c_current_pos, orig_volume, current_volume) volume_forces_list[t.A.id] += tmp_volume_force volume_forces_list[t.B.id] += tmp_volume_force volume_forces_list[t.C.id] += tmp_volume_force # calculation of volume f_metric, if needed if f_metric[4] == 1: kv_f_metric = 0.0 for p in self.mesh.points: kv_f_metric += norm(volume_forces_list[p.id]) # calculation of total elastic forces and f_metric if (el_forces[5] == 1) or (f_metric[5] == 1): elastic_forces_list = [] for p in self.mesh.points: total_elastic_forces = stretching_forces_list[p.id] + bending_forces_list[p.id] + \ local_area_forces_list[p.id] + global_area_forces_list[p.id] + \ volume_forces_list[p.id] elastic_forces_list.append(total_elastic_forces) # calculation of total f_metric, if needed if f_metric[5] == 1: total_f_metric = 0.0 for p in self.mesh.points: total_f_metric += norm(elastic_forces_list[p.id]) # calculate norms of resulting forces if (el_forces[0] + el_forces[1] + el_forces[2] + el_forces[3] + el_forces[4] + el_forces[5]) != 0: if el_forces[0] == 1: stretching_forces_norms_list = [] for p in self.mesh.points: stretching_forces_norms_list.append(norm(stretching_forces_list[p.id])) if el_forces[1] == 1: bending_forces_norms_list = [] for p in self.mesh.points: bending_forces_norms_list.append(norm(bending_forces_list[p.id])) if el_forces[2] == 1: local_area_forces_norms_list = [] for p in self.mesh.points: local_area_forces_norms_list.append(norm(local_area_forces_list[p.id])) if el_forces[3] == 1: global_area_forces_norms_list = [] for p in self.mesh.points: global_area_forces_norms_list.append(norm(global_area_forces_list[p.id])) if el_forces[4] == 1: volume_forces_norms_list = [] for p in self.mesh.points: volume_forces_norms_list.append(norm(volume_forces_list[p.id])) if el_forces[5] == 1: elastic_forces_norms_list = [] for p in self.mesh.points: elastic_forces_norms_list.append(norm(elastic_forces_list[p.id])) # output vtk (folded) if vtk_file is not None: if el_forces == (0, 0, 0, 0, 0, 0): raise Exception("OifCell: elastic_forces: The option elastic_forces was not used. " "Nothing to output to vtk file.") self.output_vtk_pos_folded(vtk_file) first = True if el_forces[0] == 1: self.append_point_data_to_vtk(file_name=vtk_file, data_name="ks_f_metric", data=stretching_forces_norms_list, first_append=first) first = False if el_forces[1] == 1: self.append_point_data_to_vtk(file_name=vtk_file, data_name="kb_f_metric", data=bending_forces_norms_list, first_append=first) first = False if el_forces[2] == 1: self.append_point_data_to_vtk(file_name=vtk_file, data_name="kal_f_metric", data=local_area_forces_norms_list, first_append=first) first = False if el_forces[3] == 1: self.append_point_data_to_vtk(file_name=vtk_file, data_name="kag_f_metric", data=global_area_forces_norms_list, first_append=first) first = False if el_forces[4] == 1: self.append_point_data_to_vtk(file_name=vtk_file, data_name="kav_f_metric", data=volume_forces_norms_list, first_append=first) first = False if el_forces[5] == 1: self.append_point_data_to_vtk(file_name=vtk_file, data_name="total_f_metric", data=elastic_forces_norms_list, first_append=first) first = False # output raw data if raw_data_file is not None: if (el_forces[0] + el_forces[1] + el_forces[2] + el_forces[3] + el_forces[4] + el_forces[5]) != 1: raise Exception("OifCell: elastic_forces: Only one type of elastic forces can be written into one " "raw_data_file. If you need several, please call OifCell.elastic_forces multiple times - " "once per elastic force.") if el_forces[0] == 1: self.output_raw_data(file_name=raw_data_file, data=stretching_forces_list) if el_forces[1] == 1: self.output_raw_data(file_name=raw_data_file, data=bending_forces_list) if el_forces[2] == 1: self.output_raw_data(file_name=raw_data_file, data=local_area_forces_list) if el_forces[3] == 1: self.output_raw_data(file_name=raw_data_file, data=global_area_forces_list) if el_forces[4] == 1: self.output_raw_data(file_name=raw_data_file, data=volume_forces_list) if el_forces[5] == 1: self.output_raw_data(file_name=raw_data_file, data=elastic_forces_list) # return f_metric if f_metric[0] + f_metric[1] + f_metric[2] + f_metric[3] + f_metric[4] + f_metric[5] > 0: results = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] if f_metric[0] == 1: results[0] = ks_f_metric if f_metric[1] == 1: results[1] = kb_f_metric if f_metric[2] == 1: results[2] = kal_f_metric if f_metric[3] == 1: results[3] = kag_f_metric if f_metric[4] == 1: results[4] = kv_f_metric if f_metric[5] == 1: results[5] = total_f_metric return results else: return 0
KonradBreitsprecher/espresso
src/python/object_in_fluid/oif_classes.py
Python
gpl-3.0
64,217
[ "ESPResSo", "VTK" ]
c09be2ccfa0d95dc73539688337d126f168762a5c24b6da4664d006fb0e6c1e3
# Copyright 2002 by Jeffrey Chang. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. import Martel as _M # make sure not imported into client's namespaces import string as _string blank_expr = _M.AssertNot(_M.Re(".")) html_expr = _M.Rep(_M.Any(_string.whitespace)) + _M.NoCase(_M.Str("<html>")) def has_expr(expr): return _M.Rep(_M.AssertNot(expr) + _M.Alt(_M.Re("."), _M.AnyEol())) + expr def has_str(str): return has_expr(_M.Str(str))
dbmi-pitt/DIKB-Micropublication
scripts/mp-scripts/Bio/dbdefs/_support.py
Python
apache-2.0
591
[ "Biopython" ]
c7ae94369f22745544fada9dc12b7731d0cea519732363149575dea618b14420
from __future__ import absolute_import, division, print_function import ast from nose.tools import raises from jaspyx.context.module import ModuleContext from jaspyx.visitor import BaseVisitor class TestBaseVisitor: def setUp(self): self.r = {} self.v = BaseVisitor('test.jpx', self.r, 2) def test_path(self): assert self.v.path == 'test.jpx' def test_registry(self): assert self.v.registry is self.r def test_default_indent(self): assert self.v.default_indent == 2 def test_stack(self): assert self.v.stack == [] def test_module(self): assert self.v.module is None def test_push(self): a = {} self.v.push(a) assert self.v.stack == [a] class TestBaseVisitorWithStack: class S: def __init__(self): self.data = [] self.indent = 2 def add(self, value): self.data.append(value) def __init__(self): self.v = BaseVisitor('test.jpx', {}, 2) self.v.visit_Str = lambda n: self.v.output(n.s) self.s = self.S() self.v.push(self.s) def test_pop(self): self.v.push('foo') self.v.pop() assert self.s.data == ['foo'] def test_output(self): self.v.output('foo') assert self.s.data == ['foo'] def test_indent(self): self.v.indent() assert self.s.data == [' '] def test_finish(self): self.v.finish() assert self.s.data == [';\n'] def test_group_defaults_empty(self): self.v.group([]) assert ''.join(self.s.data) == '()' def test_group_defaults_one(self): self.v.group(['foo']) assert ''.join(self.s.data) == '(foo)' def test_group_default_two(self): self.v.group(['foo', 'bar']) assert ''.join(self.s.data) == '(foo bar)' def test_group_default_three(self): self.v.group(['foo', 'bar', 'baz']) assert ''.join(self.s.data) == '(foo bar baz)' def test_group_non_defaults_empty(self): self.v.group([], prefix='[', infix='+', infix_node=ast.Str('quux'), suffix=']') assert ''.join(self.s.data) == '[]' def test_group_non_defaults_one(self): self.v.group(['foo'], prefix='[', infix='+', infix_node=ast.Str('quux'), suffix=']') assert ''.join(self.s.data) == '[foo]' def test_group_non_default_two(self): self.v.group(['foo', 'bar'], prefix='[', infix='+', infix_node=ast.Str('quux'), suffix=']') assert ''.join(self.s.data) == '[foo+quux+bar]' def test_group_non_default_three(self): self.v.group(['foo', 'bar', 'baz'], prefix='[', infix='+', infix_node=ast.Str('quux'), suffix=']') assert ''.join(self.s.data) == '[foo+quux+bar+quux+baz]' def test_group_node_value(self): self.v.group([ast.Str('foo')]) assert ''.join(self.s.data) == '(foo)' def test_block(self): self.v.block([]) assert self.s.data == [] def test_block_with_node(self): self.v.block([ast.Str('foo')]) assert self.s.data == ['foo'] def test_block_with_context(self): s = self.S() self.v.block([], s) assert self.v.stack[-1].data == [s] class TestBaseVisitorVisitModule: def setUp(self): self.v = BaseVisitor('test.jpx', {}, 2) self.v.visit_Str = lambda n: self.v.output(n.s) self.v.visit_Module(ast.Module([ast.Str('foo')])) def test_module_attr(self): assert isinstance(self.v.module, ModuleContext) def test_indent(self): assert self.v.module.indent == self.v.default_indent def test_stack(self): assert self.v.stack == [self.v.module] def test_body(self): assert self.v.module.body == ['foo'] class TestBaseVisitorVisitExprAndPass: def setUp(self): self.v = BaseVisitor('test.jpx', {}, 2) self.v.visit_Str = lambda n: self.v.output(n.s) self.v.visit_Module(ast.Module([])) def test_expr(self): self.v.visit(ast.Expr(ast.Str('foo'))) assert self.v.stack[-1].body == [' ', 'foo', ';\n'] def test_pass(self): self.v.visit(ast.Pass()) assert self.v.stack[-1].body == [] @raises(NotImplementedError) def test_generic_visit(self): self.v.visit(ast.Print())
ztane/jaspyx
jaspyx/tests/visitor/test_base_visitor.py
Python
mit
4,341
[ "VisIt" ]
40f793854c4831bb5772084d1f2ab665a32d340cfef2769aa3f59789cf8d49e9
# -*- coding: utf 8 -*- from __future__ import division try: import cPickle except ImportError: import pickle as cPickle import numpy as np from scipy import linalg import theano import theano.tensor as T from utils import PickleMixin, minibatch_indices, make_minibatch from optimizers import rmsprop, sgd_nesterov from utils import make_regression from extmath import logsumexp from layers import concatenate, build_recurrent_lstm_layer, build_linear_layer from layers import build_tanh_layer, init_recurrent_conditional_lstm_layer from layers import build_recurrent_conditional_lstm_layer_from_params from layers import init_linear_layer, build_linear_layer_from_params def rnn_check_array(X, y=None): if type(X) == np.ndarray and len(X.shape) == 2: X = [X.astype(theano.config.floatX)] elif type(X) == np.ndarray and len(X.shape) == 3: X = X.astype(theano.config.floatX) elif type(X) == list: if type(X[0]) == np.ndarray and len(X[0].shape) == 2: X = [x.astype(theano.config.floatX) for x in X] else: raise ValueError("X must be a 2D numpy array or an" "iterable of 2D numpy arrays") try: X[0].shape[1] except AttributeError: raise ValueError("X must be a 2D numpy array or an" "iterable of 2D numpy arrays") if y is not None: if type(y) == np.ndarray and len(y.shape) == 1: y = [y.astype('int32')] elif type(y) == np.ndarray and len(y.shape) == 2: y = y.astype('int32') elif type(y) == list: if type(y[0]) == np.ndarray and len(y[0].shape) == 1: y = [yi.astype('int32') for yi in y] elif type(y[0]) != np.ndarray: y = [np.asarray(y).astype('int32')] try: y[0].shape[0] except AttributeError: raise ValueError("y must be an iterable of 1D numpy arrays") return X, y else: # If y is not passed don't return it return X def stack_forward_layers(X_sym, X_mask, layer_sizes, recurrent_builder, random_state, one_step=False): theano_variable = X_sym input_sizes = layer_sizes[:-1] hidden_sizes = layer_sizes[1:] # set these to stop pep8 vim plugin from complaining for input_size, hidden_size in zip(input_sizes, hidden_sizes): forward_hidden, forward_params = recurrent_builder( input_size, hidden_size, theano_variable, X_mask, random_state, one_step=one_step) params = forward_params theano_variable = forward_hidden return theano_variable, params def stack_bidirectional_layers(X_sym, X_mask, layer_sizes, recurrent_builder, random_state, one_step=False): theano_variable = X_sym input_sizes = layer_sizes[:-1] hidden_sizes = layer_sizes[1:] for n, (input_size, hidden_size) in enumerate(zip(input_sizes, hidden_sizes)): if n != 0: input_size = 2 * input_size forward_hidden, forward_params = recurrent_builder( input_size, hidden_size, theano_variable, X_mask, random_state, one_step=one_step) backward_hidden, backward_params = recurrent_builder( input_size, hidden_size, theano_variable[::-1], X_mask[::-1], random_state, one_step=one_step) params = forward_params + backward_params theano_variable = concatenate( [forward_hidden, backward_hidden[::-1]], axis=forward_hidden.ndim - 1) return theano_variable, params class _BaseRNNClassifier(PickleMixin): def __init__(self, hidden_layer_sizes=[100], max_iter=1E2, learning_rate=0.01, momentum=0., learning_alg="sgd", recurrent_activation="lstm", minibatch_size=1, bidirectional=False, save_frequency=10, model_save_name="saved_model", random_seed=None, input_checking=True): if random_seed is None or type(random_seed) is int: self.random_state = np.random.RandomState(random_seed) self.learning_rate = learning_rate self.learning_alg = learning_alg self.momentum = momentum self.bidirectional = bidirectional self.hidden_layer_sizes = hidden_layer_sizes self.max_iter = int(max_iter) self.minibatch_size = minibatch_size self.save_frequency = save_frequency self.model_save_name = model_save_name self.input_checking = input_checking if self.learning_alg == "rmsprop": self.optimizer = rmsprop elif self.learning_alg == "sgd": self.optimizer = sgd_nesterov else: raise ValueError("Value of self.learning_alg" "not understood! Valid options" "%s, got %s" % (["sgd", "rmsprop"], self.learning_alg)) def fit(self, X, y, valid_X=None, valid_y=None): if self.input_checking: X, y = rnn_check_array(X, y) input_size = X[0].shape[1] # Assume that class values are sequential! and start from 0 highest_class = np.max([np.max(d) for d in y]) lowest_class = np.min([np.min(d) for d in y]) if lowest_class != 0: raise ValueError("Labels must start from 0!") # Create a list of all classes, then get uniques # sum(lists, []) is list concatenation all_classes = np.unique(sum([list(np.unique(d)) for d in y], [])) # +1 to include endpoint output_size = len(np.arange(lowest_class, highest_class + 1)) X_sym = T.tensor3('x') y_sym = T.tensor3('y') X_mask = T.matrix('x_mask') y_mask = T.matrix('y_mask') self.layers_ = [] self.layer_sizes_ = [input_size] self.layer_sizes_.extend(self.hidden_layer_sizes) self.layer_sizes_.append(output_size) if not hasattr(self, 'fit_function'): print("Building model!") self._setup_functions(X_sym, y_sym, X_mask, y_mask, self.layer_sizes_) self.training_loss_ = [] if valid_X is not None: self.validation_loss_ = [] if self.input_checking: valid_X, valid_y = rnn_check_array(valid_X, valid_y) for vy in valid_y: if not np.in1d(np.unique(vy), all_classes).all(): raise ValueError( "Validation set contains classes not in training" "set! Training set classes: %s\n, Validation set \ classes: %s" % (all_classes, np.unique(vy))) best_valid_loss = np.inf best_train_loss = np.inf try: for itr in range(self.max_iter): print("Starting pass %d through the dataset" % itr) total_train_loss = 0 for i, j in minibatch_indices(X, self.minibatch_size): X_n, y_n, X_mask, y_mask = make_minibatch(X[i:j], y[i:j], output_size) train_loss = self.fit_function(X_n, y_n, X_mask, y_mask) total_train_loss += train_loss current_train_loss = total_train_loss / len(X) print("Training loss %f" % current_train_loss) self.training_loss_.append(current_train_loss) if valid_X is not None: total_valid_loss = 0 for i, j in minibatch_indices(valid_X, self.minibatch_size): valid_X_n, valid_y_n, X_mask, y_mask = make_minibatch( valid_X[i:j], valid_y[i:j], output_size) valid_loss = self.loss_function(valid_X_n, valid_y_n, X_mask, y_mask) total_valid_loss += valid_loss current_valid_loss = total_valid_loss / len(valid_X) print("Validation loss %f" % current_valid_loss) self.validation_loss_.append(current_valid_loss) if (itr % self.save_frequency) == 0: f = open(self.model_save_name + "_snapshot.pkl", 'wb') cPickle.dump(self, f, protocol=2) f.close() if current_train_loss < best_train_loss: best_train_loss = current_train_loss f = open(self.model_save_name + "_train_best.pkl", 'wb') cPickle.dump(self, f, protocol=2) f.close() if itr == (self.max_iter - 1): f = open(self.model_save_name + "_last.pkl", 'wb') cPickle.dump(self, f, protocol=2) f.close() # Shortcircuit if statement if valid_X is not None and current_valid_loss < best_valid_loss: best_valid_loss = current_valid_loss f = open(self.model_save_name + "_valid_best.pkl", 'wb') cPickle.dump(self, f, protocol=2) f.close() except KeyboardInterrupt: print("User cancelled, saving last model!") f = open(self.model_save_name + "_interrupt.pkl", 'wb') cPickle.dump(self, f, protocol=2) f.close() def predict(self, X): raise ValueError("Not yet implemented!") X = rnn_check_array(X) predictions = [] for n in range(len(X)): X_mask = np.ones((len(X[n]), 1)).astype(theano.config.floatX) pred = np.argmax(self.predict_function(X[n], X_mask)[0], axis=1) predictions.append(pred) return predictions def predict_proba(self, X): raise ValueError("Not yet implemented!") X = rnn_check_array(X) predictions = [] for n in range(len(X)): X_n = X[n][None].transpose(1, 0, 2) X_mask = np.ones((len(X_n), 1)).astype(theano.config.floatX) pred = self.predict_function(X_n, X_mask)[0] predictions.append(pred) return predictions class _BaseRNNRegressor(PickleMixin): def __init__(self, hidden_layer_sizes=[100], n_mixture_components=20, max_iter=1E2, learning_rate=0.01, momentum=0., learning_alg="sgd", recurrent_activation="lstm", minibatch_size=1, bidirectional=False, save_frequency=10, model_save_name="saved_model", random_seed=None, input_checking=True): if random_seed is None or type(random_seed) is int: self.random_state = np.random.RandomState(random_seed) self.learning_rate = learning_rate self.learning_alg = learning_alg self.momentum = momentum self.bidirectional = bidirectional self.hidden_layer_sizes = hidden_layer_sizes self.n_mixture_components = n_mixture_components self.max_iter = int(max_iter) self.minibatch_size = minibatch_size self.save_frequency = save_frequency self.model_save_name = model_save_name self.input_checking = input_checking if self.learning_alg == "rmsprop": self.optimizer = rmsprop elif self.learning_alg == "sgd": self.optimizer = sgd_nesterov else: raise ValueError("Value of self.learning_alg" "not understood! Valid options" "%s, got %s" % (["sgd", "rmsprop"], self.learning_alg)) def fit(self, X, valid_X=None): # Input size: # n_samples # n_timesteps # n_features if self.input_checking: X = rnn_check_array(X) self.n_features = X[0].shape[-1] # Regression features self.input_size_ = self.n_features self.output_size_ = self.n_features X_sym = T.tensor3('x') y_sym = T.tensor3('y') X_mask_sym = T.matrix('x_mask') y_mask_sym = T.matrix('y_mask') self.layers_ = [] self.layer_sizes_ = [self.input_size_] self.layer_sizes_.extend(self.hidden_layer_sizes) self.layer_sizes_.append(self.output_size_) self.training_loss_ = [] if valid_X is not None: self.validation_loss_ = [] if self.input_checking: valid_X = rnn_check_array(valid_X) best_valid_loss = np.inf best_train_loss = np.inf try: for itr in range(self.max_iter): print("Starting pass %d through the dataset" % itr) total_train_loss = 0 for i, j in minibatch_indices(X, self.minibatch_size): X_n, y_n, X_mask, y_mask = make_regression(X[i:j]) if not hasattr(self, 'fit_function'): # This is here to make debugging easier X_sym.tag.test_value = X_n y_sym.tag.test_value = y_n X_mask_sym.tag.test_value = X_mask y_mask_sym.tag.test_value = y_mask print("Building model!") print("Minibatch X size %s" % str(X_n.shape)) print("Minibatch y size %s" % str(y_n.shape)) self._setup_functions(X_sym, y_sym, X_mask_sym, y_mask_sym, self.layer_sizes_) train_loss = self.fit_function(X_n, y_n, X_mask, y_mask) total_train_loss += train_loss current_train_loss = total_train_loss / len(X) print("Training loss %f" % current_train_loss) self.training_loss_.append(current_train_loss) if valid_X is not None: total_valid_loss = 0 for i, j in minibatch_indices(valid_X, self.minibatch_size): valid_X_n, valid_y_n, _, _ = make_regression( valid_X[i:j], self.window_size, self.prediction_size) valid_loss = self.loss_function(valid_X_n, valid_y_n) total_valid_loss += valid_loss current_valid_loss = total_valid_loss / len(valid_X) print("Validation loss %f" % current_valid_loss) self.validation_loss_.append(current_valid_loss) if (itr % self.save_frequency) == 0: f = open(self.model_save_name + "_snapshot.pkl", 'wb') cPickle.dump(self, f, protocol=2) f.close() if current_train_loss < best_train_loss: best_train_loss = current_train_loss f = open(self.model_save_name + "_train_best.pkl", 'wb') cPickle.dump(self, f, protocol=2) f.close() if itr == (self.max_iter - 1): f = open(self.model_save_name + "_last.pkl", 'wb') cPickle.dump(self, f, protocol=2) f.close() # Shortcircuit if statement if valid_X is not None and current_valid_loss < best_valid_loss: best_valid_loss = current_valid_loss f = open(self.model_save_name + "_valid_best.pkl", 'wb') cPickle.dump(self, f, protocol=2) f.close() except KeyboardInterrupt: print("User cancelled, saving last model!") f = open(self.model_save_name + "_interrupt.pkl", 'wb') cPickle.dump(self, f, protocol=2) f.close() class AGMMRNN(_BaseRNNRegressor): def _setup_functions(self, X_sym, y_sym, X_mask, y_mask, layer_sizes): recurrent_sizes = layer_sizes[:-1] input_variable, params = stack_forward_layers( X_sym, X_mask, recurrent_sizes, build_recurrent_lstm_layer, self.random_state) sz = recurrent_sizes[-1] # Hardcoded, works for 3 dims/ handwriting *only*! # Up/down channel binary, binary_params = build_linear_layer( sz, 1, input_variable, self.random_state) params = params + binary_params # Means mu, mu_params = build_linear_layer( sz, self.n_mixture_components * 2, input_variable, self.random_state) params = params + mu_params # Diagonal var, var_params = build_linear_layer( sz, self.n_mixture_components * 2, input_variable, self.random_state) params = params + var_params # Off-diagonal corr, corr_params = build_linear_layer( sz, self.n_mixture_components * 1, input_variable, self.random_state) params = params + corr_params coeff, coeff_params = build_linear_layer( sz, self.n_mixture_components, input_variable, self.random_state) params = params + coeff_params mu_shp = mu.shape var_shp = var.shape corr_shp = corr.shape coeff_shp = coeff.shape y_shp = y_sym.shape # TODO: Masking! # Reshape everything to 2D coeff = coeff.reshape([coeff_shp[0] * coeff_shp[1], coeff_shp[2]]) coeff = T.nnet.softmax(coeff) y_r = y_sym.reshape([y_shp[0] * y_shp[1], y_shp[2]]) y_b = y_r[:, 0] y_r = y_r[:, 1:] mu = mu.reshape([mu_shp[0] * mu_shp[1], mu_shp[2]]) var = var.reshape([var_shp[0] * var_shp[1], var_shp[2]]) corr = corr.reshape([corr_shp[0] * corr_shp[1], corr_shp[2]]) log_var = T.log(T.nnet.softplus(var) + 1E-9) # Negative due to sigmoid? AG paper has positive exponential binary = T.nnet.sigmoid(-binary) corr = T.tanh(corr) binary = binary.ravel() # Reshape using 2D shapes... y_r = y_r.dimshuffle(0, 1, 'x') mu = mu.reshape([mu.shape[0], T.cast(mu.shape[1] / coeff.shape[-1], 'int32'), coeff.shape[-1]]) log_var = log_var.reshape([log_var.shape[0], T.cast(log_var.shape[1] / coeff.shape[-1], 'int32'), coeff.shape[-1]]) corr = corr.reshape([corr.shape[0], T.cast(corr.shape[1] / coeff.shape[-1], 'int32'), coeff.shape[-1]]) # Exact AG cost - see the paper "Generating Sequences with Recurrent # Neural Networks", Alex Graves # http://arxiv.org/pdf/1308.0850v5.pdf x1 = X_sym[:, :, 1] x1 = T.addbroadcast(x1, 1) x2 = X_sym[:, :, 2] x2 = T.addbroadcast(x2, 1) mu1 = mu[:, 0, :] mu2 = mu[:, 1, :] log_var1 = log_var[:, 0, :] log_var2 = log_var[:, 1, :] # Binary cost c_b = -y_b * T.log(binary + 1E-9) - (1 - y_b) * T.log(1 - binary + 1E-9) # First part of log Gaussian c_g1 = -T.log(2 * np.pi) - log_var1 - log_var2 - .5 * T.log( 1 - T.sum(corr, axis=1) ** 2 + 1E-9) # Multiplier on z c_g2 = -.5 * 1. / (1 - T.sum(corr, axis=1) ** 2) z = (x1 - mu1) ** 2 / T.exp(log_var1) ** 2 z += (x2 - mu2) ** 2 / T.exp(log_var2) ** 2 z -= 2 * T.sum(corr, axis=1) * (x1 - mu1) * (x2 - mu2) / ( T.exp(log_var1) * T.exp(log_var2)) cost = c_g1 + c_g2 * z cost = T.sum(-logsumexp(T.log(coeff) + cost, axis=1) + c_b) grads = T.grad(cost, params) self.opt_ = self.optimizer(params) updates = self.opt_.updates( params, grads, self.learning_rate, self.momentum) self.fit_function = theano.function(inputs=[X_sym, y_sym, X_mask, y_mask], outputs=cost, updates=updates, on_unused_input="ignore") self.loss_function = theano.function(inputs=[X_sym, y_sym, X_mask, y_mask], outputs=cost, on_unused_input="ignore") self.generate_function = theano.function(inputs=[X_sym, X_mask], outputs=[binary, mu, log_var, corr, coeff], on_unused_input="ignore") def sample(self, n_steps=100, bias=1., alg="soft", seed_sequence=None, random_seed=None): if random_seed is None: random_state = self.random_state else: random_state = np.random.RandomState(random_seed) samples = np.zeros((n_steps, self.n_features)) if seed_sequence is not None: if seed_sequence.shape[1] != self.n_features: raise ValueError("Seed sequence needs to have the same number" "of features as the training data!") seed_len = len(seed_sequence) samples[:seed_len, :] = seed_sequence else: seed_len = 1 samples[0, :] = np.random.rand(self.n_features) for n in range(seed_len, n_steps): X_n = rnn_check_array(samples[None]) X_n = X_n.transpose(1, 0, 2) X_mask = np.ones((X_n.shape[0], X_n.shape[1]), dtype=theano.config.floatX) r = self.generate_function(X_n[:n], X_mask[:n]) # get samples # outputs are n_features, n_predictions, n_gaussians binary = r[0][-1] mu = r[1][-1] log_var = r[2][-1] corr = r[3][-1] coeff = r[4][-1] # Make sure it sums to 1 coeff = coeff / coeff.sum() full_cov = np.zeros((mu.shape[0], mu.shape[0], mu.shape[1])) var = np.exp(log_var) chol_factors = np.zeros_like(full_cov) for i in range(mu.shape[1]): full_cov[0, 0, i] = var[0, i] sx = np.sqrt(var[0, i]) full_cov[1, 1, i] = var[1, i] sy = np.sqrt(var[1, i]) cov = corr[0, i] * (sx * sy) full_cov[0, 1, i] = cov full_cov[1, 0, i] = cov chol_factors[:, :, i] = linalg.cholesky(full_cov[:, :, i]) s = np.zeros_like(mu) if alg == "hard": # Choice sample k = np.where(random_state.rand() < coeff.cumsum())[0][0] s = bias * np.dot(random_state.randn(mu.shape[0]), chol_factors[:, :, k]) + mu elif alg == "soft": # Averaged sample for i in range(mu.shape[1]): s[:, i] = bias * np.dot(random_state.randn(mu.shape[0]), chol_factors[:, :, i]) s += mu s = np.dot(s, coeff) else: raise ValueError("alg must be 'hard' or 'soft'") samples[n, 0] = binary samples[n, 1:] = s return np.array(samples) def force_sample(self, X, bias=1., alg="soft", random_seed=None): if len(X.shape) != 2: raise ValueError("X must be a 2D array of (steps, features)") if random_seed is None: random_state = self.random_state else: random_state = np.random.RandomState(random_seed) samples = np.zeros((X.shape[0], self.n_features)) X_n = rnn_check_array(X[None]) X_n = X_n.transpose(1, 0, 2) X_mask = np.ones((X_n.shape[0], X_n.shape[1]), dtype=theano.config.floatX) r = self.generate_function(X_n, X_mask) for n in range(X.shape[0]): # get samples # outputs are n_features, n_predictions, n_gaussians binary = r[0][n] mu = r[1][n] log_var = r[2][n] corr = r[3][n] coeff = r[4][n] # Make sure it sums to 1 coeff = coeff / coeff.sum() full_cov = np.zeros((mu.shape[0], mu.shape[0], mu.shape[1])) var = np.exp(log_var) chol_factors = np.zeros_like(full_cov) for i in range(mu.shape[1]): full_cov[0, 0, i] = var[0, i] sx = np.sqrt(var[0, i]) full_cov[1, 1, i] = var[1, i] sy = np.sqrt(var[1, i]) cov = corr[0, i] * (sx * sy) full_cov[0, 1, i] = cov full_cov[1, 0, i] = cov chol_factors[:, :, i] = linalg.cholesky(full_cov[:, :, i]) s = np.zeros_like(mu) if alg == "hard": # Choice sample k = np.where(random_state.rand() < coeff.cumsum())[0][0] s = bias * np.dot(random_state.randn(mu.shape[0]), chol_factors[:, :, k]) + mu elif alg == "soft": # Averaged sample for i in range(mu.shape[1]): s[:, i] = bias * np.dot(random_state.randn(mu.shape[0]), chol_factors[:, :, i]) s += mu s = np.dot(s, coeff) else: raise ValueError("alg must be 'hard' or 'soft'") samples[n, 0] = binary samples[n, 1:] = s return np.array(samples) class GMMRNN(_BaseRNNRegressor): def _setup_functions(self, X_sym, y_sym, X_mask, y_mask, layer_sizes): recurrent_sizes = layer_sizes[:-1] input_variable, params = stack_forward_layers( X_sym, X_mask, recurrent_sizes, build_recurrent_lstm_layer, self.random_state) sz = recurrent_sizes[-1] mu, mu_params = build_linear_layer( sz, self.n_mixture_components * self.n_features, input_variable, self.random_state) params = params + mu_params var, var_params = build_linear_layer( sz, self.n_mixture_components * self.n_features, input_variable, self.random_state) params = params + var_params coeff, coeff_params = build_linear_layer( sz, self.n_mixture_components, input_variable, self.random_state) params = params + coeff_params mu_shp = mu.shape var_shp = var.shape coeff_shp = coeff.shape y_shp = y_sym.shape # TODO: Masking! # Reshape everything to 2D coeff = coeff.reshape([coeff_shp[0] * coeff_shp[1], coeff_shp[2]]) coeff = T.nnet.softmax(coeff) y_r = y_sym.reshape([y_shp[0] * y_shp[1], y_shp[2]]) mu = mu.reshape([mu_shp[0] * mu_shp[1], mu_shp[2]]) var = var.reshape([var_shp[0] * var_shp[1], var_shp[2]]) # Reshape using 2D shapes... y_r = y_r.dimshuffle(0, 1, 'x') mu = mu.reshape([mu.shape[0], T.cast(mu.shape[1] / coeff.shape[-1], 'int32'), coeff.shape[-1]]) var = var.reshape([var.shape[0], T.cast(var.shape[1] / coeff.shape[-1], 'int32'), coeff.shape[-1]]) # Calculate GMM cost with minimum tolerance log_var = T.log(T.nnet.softplus(var) + 1E-15) cost = -0.5 * T.sum(T.sqr(y_r - mu) * T.exp(-log_var) + log_var + T.log(2 * np.pi), axis=1) cost = -logsumexp(T.log(coeff) + cost, axis=1).sum() grads = T.grad(cost, params) self.opt_ = self.optimizer(params) updates = self.opt_.updates( params, grads, self.learning_rate, self.momentum) self.fit_function = theano.function(inputs=[X_sym, y_sym, X_mask, y_mask], outputs=cost, updates=updates, on_unused_input="ignore") self.loss_function = theano.function(inputs=[X_sym, y_sym, X_mask, y_mask], outputs=cost, on_unused_input="ignore") self.generate_function = theano.function(inputs=[X_sym, X_mask], outputs=[mu, log_var, coeff], on_unused_input="ignore") def sample(self, n_steps=100, bias=1., alg="soft", seed_sequence=None, random_seed=None): if random_seed is None: random_state = self.random_state else: random_state = np.random.RandomState(random_seed) if seed_sequence is not None: raise ValueError("Seeded generation not yet supported") samples = np.zeros((n_steps, self.n_features)) s = random_state.rand(self.n_features) samples[0] = s for n in range(1, n_steps): X_n = rnn_check_array(samples[None]) X_n = X_n.transpose(1, 0, 2) X_mask = np.ones((X_n.shape[0], X_n.shape[1]), dtype=theano.config.floatX) r = self.generate_function(X_n[:n], X_mask[:n]) # get samples # outputs are n_features, n_predictions, n_gaussians mu = r[0][-1] log_var = r[1][-1] coeff = r[2][-1] # Make sure it sums to 1 coeff = coeff / coeff.sum() if alg == "hard": # Choice sample k = np.where(random_state.rand() < coeff.cumsum())[0][0] s = random_state.randn(mu.shape[0]) * np.sqrt( np.exp(log_var[:, k])) + mu[:, k] elif alg == "soft": # Averaged sample s = bias * random_state.randn(*mu.shape) * np.sqrt( np.exp(log_var)) + mu s = np.dot(s, coeff) else: raise ValueError("alg must be 'hard' or 'soft'") samples[n] = s return np.array(samples) def force_sample(self, X, bias=1., alg="soft", random_seed=None): if len(X.shape) != 2: raise ValueError("X must be a 2D array of (steps, features)") if random_seed is None: random_state = self.random_state else: random_state = np.random.RandomState(random_seed) samples = np.zeros((X.shape[0], self.n_features)) X_n = rnn_check_array(X[None]) X_n = X_n.transpose(1, 0, 2) X_mask = np.ones((X_n.shape[0], X_n.shape[1]), dtype=theano.config.floatX) r = self.generate_function(X_n, X_mask) for n in range(X.shape[0]): # get samples # outputs are n_features, n_predictions, n_gaussians mu = r[0][n] log_var = r[1][n] coeff = r[2][n] # Make sure it sums to 1 coeff = coeff / coeff.sum() if alg == "hard": # Choice sample k = np.where(random_state.rand() < coeff.cumsum())[0][0] s = random_state.randn(mu.shape[0]) * np.sqrt( np.exp(log_var[:, k])) + mu[:, k] elif alg == "soft": # Averaged sample s = bias * random_state.randn(*mu.shape) * np.sqrt( np.exp(log_var)) + mu s = np.dot(s, coeff) else: raise ValueError("alg must be 'hard' or 'soft'") samples[n] = s return np.array(samples) class RNN(_BaseRNNClassifier): def _setup_functions(self, X_sym, y_sym, X_mask, y_mask, layer_sizes): (input_variable, params, sz, input_size, hidden_sizes, output_size) = self._stack_layers(X_sym, X_mask, layer_sizes) output, output_params = build_linear_layer(sz, output_size, input_variable, self.random_state) params = params + output_params shp = output.shape output = output.reshape([shp[0] * shp[1], shp[2]]) y_hat_sym = T.nnet.softmax(output) y_sym_reshaped = y_sym.reshape([shp[0] * shp[1], shp[2]]) cost = -T.mean((y_sym_reshaped * T.log(y_hat_sym)).sum(axis=1)) grads = T.grad(cost, params) self.opt_ = self.optimizer(params) updates = self.opt_.updates( params, grads, self.learning_rate, self.momentum) self.fit_function = theano.function(inputs=[X_sym, y_sym, X_mask, y_mask], outputs=cost, updates=updates, on_unused_input="ignore") self.loss_function = theano.function(inputs=[X_sym, y_sym, X_mask, y_mask], outputs=cost, on_unused_input="ignore") self.predict_function = theano.function( inputs=[X_sym, X_mask], outputs=y_hat_sym, on_unused_input="ignore") class EncDecRNN(_BaseRNNClassifier): def predict(self, X): raise ValueError("Not yet implemented!") X = rnn_check_array(X) for n in range(len(X)): X_n = X[n][None].transpose(1, 0, 2) X_mask = np.ones((len(X_n), 1)).astype(theano.config.floatX) state, memory, ctx = self._encode(X_n, X_mask) for i in range(100): ctx = np.tile(ctx.squeeze(), [1, 1, 1]).transpose(1, 0, 2) """ self._sampler_step = theano.function( [y_sw_sampler, context, X_mask, init_state_sampler, init_memory_sampler], [y_hat_sampler, next_state, next_memory]) from IPython import embed; embed() """ def _setup_functions(self, X_sym, y_sym, X_mask, y_mask, layer_sizes): (input_variable, params, sz, input_size, hidden_sizes, output_size) = self._stack_layers(X_sym, X_mask, layer_sizes) # hardmode context = input_variable context_mean = context[0] init_state, state_params = build_tanh_layer(sz, hidden_sizes[-1], context_mean, self.random_state) init_memory, memory_params = build_tanh_layer(sz, hidden_sizes[-1], context_mean, self.random_state) # partial sampler setup self._encode = theano.function([X_sym, X_mask], [init_state, init_memory, context]) init_state_sampler = T.matrix() init_memory_sampler = T.matrix() y_sw_sampler = T.tensor3() y_sw_mask = T.alloc(1., y_sw_sampler.shape[0], 1) # need this style of init to reuse params for sampler and actual # training. This makes this part quite nasty - dictionary # for initialization and params is making more and more sense. # conditional params will be reused below conditional_params = init_recurrent_conditional_lstm_layer( output_size, hidden_sizes[-1], sz, self.random_state) rval, _p = build_recurrent_conditional_lstm_layer_from_params( conditional_params, y_sw_sampler, y_sw_mask, context, X_mask, init_state_sampler, init_memory_sampler, self.random_state, one_step=True) next_state, next_memory, sampler_contexts, _ = rval # end sampler parts... for now params = params + state_params + memory_params shifted_labels = T.zeros_like(y_sym) shifted_labels = T.set_subtensor(shifted_labels[1:], y_sym[:-1]) y_sym = shifted_labels rval, _p = build_recurrent_conditional_lstm_layer_from_params( conditional_params, shifted_labels, y_mask, context, X_mask, init_state, init_memory, self.random_state) projected_hidden, _, contexts, attention = rval params = params + conditional_params # once again, need to use same params for sample gen lh_params = init_linear_layer(hidden_sizes[-1], output_size, self.random_state) logit_hidden, _ = build_linear_layer_from_params(lh_params, projected_hidden) params = params + lh_params lo_params = init_linear_layer(output_size, output_size, self.random_state) logit_out, _ = build_linear_layer_from_params(lo_params, y_sym) params = params + lo_params lc_params = init_linear_layer(sz, output_size, self.random_state) logit_contexts, _ = build_linear_layer_from_params(lc_params, contexts) params = params + lc_params logit = T.tanh(logit_hidden + logit_out + logit_contexts) output_params = init_linear_layer(output_size, output_size, self.random_state) output, _ = build_linear_layer_from_params(output_params, logit) params = params + output_params shp = output.shape output = output.reshape([shp[0] * shp[1], shp[2]]) y_hat_sym = T.nnet.softmax(output) # Need to apply mask so that cost isn't punished y_sym_reshaped = (y_sym * y_mask.dimshuffle(0, 1, 'x')).reshape( [shp[0] * shp[1], shp[2]]) y_sym_reshaped = y_sym.reshape([shp[0] * shp[1], shp[2]]) cost = -T.mean((y_sym_reshaped * T.log(y_hat_sym)).sum(axis=1)) # Finish sampler logit_sampler_hidden, _ = build_linear_layer_from_params(lh_params, next_state) logit_sampler_out, _ = build_linear_layer_from_params(lo_params, y_sw_sampler) logit_sampler_contexts, _ = build_linear_layer_from_params( lc_params, sampler_contexts) logit_sampler = T.tanh(logit_sampler_hidden + logit_sampler_out + logit_sampler_contexts) output_sampler, _ = build_linear_layer_from_params(output_params, logit_sampler) shp = output_sampler.shape output_sampler = output_sampler.reshape([shp[0] * shp[1], shp[2]]) y_hat_sampler = T.nnet.softmax(output_sampler) self._sampler_step = theano.function( [y_sw_sampler, context, X_mask, init_state_sampler, init_memory_sampler], [y_hat_sampler, next_state, next_memory]) self.params_ = params updates = self._updates(X_sym, y_sym, params, cost) self.fit_function = theano.function(inputs=[X_sym, y_sym, X_mask, y_mask], outputs=cost, updates=updates, on_unused_input="warn") self.loss_function = theano.function(inputs=[X_sym, y_sym, X_mask, y_mask], outputs=cost, on_unused_input="warn") self.predict_function = theano.function( inputs=[X_sym, X_mask, y_sym, y_mask], outputs=y_hat_sym)
kastnerkyle/minet
minet/recurrent.py
Python
bsd-3-clause
40,812
[ "Gaussian" ]
e99d201dff8aac5e8def5b57bfa420a5d937e3e516d312deb1c78e02c8645f38
# # tsne.py # # Implementation of t-SNE in Python. The implementation was tested on Python 2.5.1, and it requires a working # installation of NumPy. The implementation comes with an example on the MNIST dataset. In order to plot the # results of this example, a working installation of matplotlib is required. # The example can be run by executing: ipython tsne.py -pylab # # # Created by Laurens van der Maaten on 20-12-08. # Copyright (c) 2008 Tilburg University. All rights reserved. import numpy as Math import pylab as Plot import matplotlib.colors as colors from featurevectors import * def Hbeta(D = Math.array([]), beta = 1.0): """Compute the perplexity and the P-row for a specific value of the precision of a Gaussian distribution.""" # Compute P-row and corresponding perplexity P = Math.exp(-D.copy() * beta); sumP = sum(P); H = Math.log(sumP) + beta * Math.sum(D * P) / sumP; P = P / sumP; return H, P; def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0): """Performs a binary search to get P-values in such a way that each conditional Gaussian has the same perplexity.""" # Initialize some variables print "Computing pairwise distances..." (n, d) = X.shape; sum_X = Math.sum(Math.square(X), 1); D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X); P = Math.zeros((n, n)); beta = Math.ones((n, 1)); logU = Math.log(perplexity); # Loop over all datapoints for i in range(n): # Print progress if i % 500 == 0: print "Computing P-values for point ", i, " of ", n, "..." # Compute the Gaussian kernel and entropy for the current precision betamin = -Math.inf; betamax = Math.inf; Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))]; (H, thisP) = Hbeta(Di, beta[i]); # Evaluate whether the perplexity is within tolerance Hdiff = H - logU; tries = 0; while Math.abs(Hdiff) > tol and tries < 50: # If not, increase or decrease precision if Hdiff > 0: betamin = beta[i].copy(); if betamax == Math.inf or betamax == -Math.inf: beta[i] = beta[i] * 2; else: beta[i] = (beta[i] + betamax) / 2; else: betamax = beta[i].copy(); if betamin == Math.inf or betamin == -Math.inf: beta[i] = beta[i] / 2; else: beta[i] = (beta[i] + betamin) / 2; # Recompute the values (H, thisP) = Hbeta(Di, beta[i]); Hdiff = H - logU; tries = tries + 1; # Set the final row of P P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP; # Return final P-matrix print "Mean value of sigma: ", Math.mean(Math.sqrt(1 / beta)) return P; def pca(X = Math.array([]), no_dims = 50): """Runs PCA on the NxD array X in order to reduce its dimensionality to no_dims dimensions.""" print "Preprocessing the data using PCA..." (n, d) = X.shape; X = X - Math.tile(Math.mean(X, 0), (n, 1)); (l, M) = Math.linalg.eig(Math.dot(X.T, X)); Y = Math.dot(X, M[:,0:no_dims]); return Y; def tsne(X = Math.array([]), no_dims = 2, initial_dims = 50, perplexity = 30.0): """Runs t-SNE on the dataset in the NxD array X to reduce its dimensionality to no_dims dimensions. The syntaxis of the function is Y = tsne.tsne(X, no_dims, perplexity), where X is an NxD NumPy array.""" # Check inputs if X.dtype != "float64": print "Error: array X should have type float64."; return -1; #if no_dims.__class__ != "<type 'int'>": # doesn't work yet! # print "Error: number of dimensions should be an integer."; # return -1; # Initialize variables X = pca(X, initial_dims).real; (n, d) = X.shape; max_iter = 1000; initial_momentum = 0.5; final_momentum = 0.8; eta = 500; min_gain = 0.01; Y = Math.random.randn(n, no_dims); dY = Math.zeros((n, no_dims)); iY = Math.zeros((n, no_dims)); gains = Math.ones((n, no_dims)); # Compute P-values P = x2p(X, 1e-5, perplexity); P = P + Math.transpose(P); P = P / Math.sum(P); P = P * 4; # early exaggeration P = Math.maximum(P, 1e-12); # Run iterations for iter in range(max_iter): # Compute pairwise affinities sum_Y = Math.sum(Math.square(Y), 1); num = 1 / (1 + Math.add(Math.add(-2 * Math.dot(Y, Y.T), sum_Y).T, sum_Y)); num[range(n), range(n)] = 0; Q = num / Math.sum(num); Q = Math.maximum(Q, 1e-12); # Compute gradient PQ = P - Q; for i in range(n): dY[i,:] = Math.sum(Math.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (Y[i,:] - Y), 0); # Perform the update if iter < 20: momentum = initial_momentum else: momentum = final_momentum gains = (gains + 0.2) * ((dY > 0) != (iY > 0)) + (gains * 0.8) * ((dY > 0) == (iY > 0)); gains[gains < min_gain] = min_gain; iY = momentum * iY - eta * (gains * dY); Y = Y + iY; Y = Y - Math.tile(Math.mean(Y, 0), (n, 1)); # Compute current value of cost function if (iter + 1) % 10 == 0: C = Math.sum(P * Math.log(P / Q)); print "Iteration ", (iter + 1), ": error is ", C # Stop lying about P-values if iter == 100: P = P / 4; # Return solution return Y; #if __name__ == "__main__": def xec(): print("Running t-SNE on your dataset...") vector_list, vect_to_mat_dict = vectorize_and_catalog(materials_list) normalized_vectors = normalize_vectors(vector_list) labels = Math.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]]) Y = tsne(Math.array(normalized_vectors), 2, 50, 20.0); tsne_to_vect_dict = dict(zip([tuple(x) for x in Y.tolist()], [tuple(x) for x in vector_list])) print(Y) Plot.scatter(Y[:,0], Y[:,1], 20) Plot.show() return Y, tsne_to_vect_dict, vect_to_mat_dict
mp-interns/eratosthenes
neigh/tsne.py
Python
bsd-2-clause
5,731
[ "Gaussian" ]
998644c49ca920b0c3229293be939b56bba7297f80be000cb597dff49efc1a3a
# -*- coding: utf-8 -*- # Goofy name to avoid collision w/ ../py/test_security.py from __future__ import absolute_import, division, print_function, unicode_literals from gratipay.testing import BrowserHarness from selenium.common.exceptions import NoAlertPresentException class RejectNullBytesInURI(BrowserHarness): def test_really_protects_against_reflected_xss(self): self.make_package() self.visit('/on/npm/foo') assert self.css('#banner h1').text == 'foo' # known bad in Chrome 60 and Firefox 55: self.visit('/on/npm/foo%2500bar%3Cx%3E%2500%2500%2500%2500%2500%2500%2500' '%3Cscript%3Ealert(document.domain)%3C%2Fscript%3E') try: alert = self.get_alert() except NoAlertPresentException: assert 1 else: alert.dismiss() # avoid leaking into other tests assert 0, "Game over."
gratipay/gratipay.com
tests/ttw/test_security_ttw.py
Python
mit
918
[ "VisIt" ]
87347336dbb4676ccfa38d62d58e10fd41009b7a6952ef1db1060801f6e2ffed
from __future__ import division, print_function, absolute_import import os import sys import contextlib from os.path import join as pjoin from hashlib import md5 from shutil import copyfileobj import numpy as np import nibabel as nib import tarfile import zipfile from dipy.core.gradients import gradient_table from dipy.io.gradients import read_bvals_bvecs if sys.version_info[0] < 3: from urllib2 import urlopen else: from urllib.request import urlopen # Set a user-writeable file-system location to put files: if 'DIPY_HOME' in os.environ: dipy_home = os.environ['DIPY_HOME'] else: dipy_home = pjoin(os.path.expanduser('~'), '.dipy') # The URL to the University of Washington Researchworks repository: UW_RW_URL = \ "https://digital.lib.washington.edu/researchworks/bitstream/handle/" class FetcherError(Exception): pass def _log(msg): """Helper function to keep track of things. For now, just prints the message """ print(msg) def update_progressbar(progress, total_length): """Show progressbar Takes a number between 0 and 1 to indicate progress from 0 to 100%. """ # Try to set the bar_length according to the console size try: columns = os.popen('tput cols', 'r').read() bar_length = int(columns) - 46 if(not (bar_length > 1)): bar_length = 20 except: # Default value if determination of console size fails bar_length = 20 block = int(round(bar_length * progress)) size_string = "{0:.2f} MB".format(float(total_length) / (1024 * 1024)) text = "\rDownload Progress: [{0}] {1:.2f}% of {2}".format( "#" * block + "-" * (bar_length - block), progress * 100, size_string) sys.stdout.write(text) sys.stdout.flush() def copyfileobj_withprogress(fsrc, fdst, total_length, length=16 * 1024): copied = 0 while True: buf = fsrc.read(length) if not buf: break fdst.write(buf) copied += len(buf) progress = float(copied) / float(total_length) update_progressbar(progress, total_length) def _already_there_msg(folder): """ Prints a message indicating that a certain data-set is already in place """ msg = 'Dataset is already in place. If you want to fetch it again ' msg += 'please first remove the folder %s ' % folder _log(msg) def _get_file_md5(filename): """Compute the md5 checksum of a file""" md5_data = md5() with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(128 * md5_data.block_size), b''): md5_data.update(chunk) return md5_data.hexdigest() def check_md5(filename, stored_md5=None): """ Computes the md5 of filename and check if it matches with the supplied string md5 Input ----- filename : string Path to a file. md5 : string Known md5 of filename to check against. If None (default), checking is skipped """ if stored_md5 is not None: computed_md5 = _get_file_md5(filename) if stored_md5 != computed_md5: msg = """The downloaded file, %s, does not have the expected md5 checksum of "%s". Instead, the md5 checksum was: "%s". This could mean that something is wrong with the file or that the upstream file has been updated. You can try downloading the file again or updating to the newest version of dipy.""" % (filename, stored_md5, computed_md5) raise FetcherError(msg) def _get_file_data(fname, url): with contextlib.closing(urlopen(url)) as opener: if sys.version_info[0] < 3: try: response_size = opener.headers['content-length'] except KeyError: response_size = None else: # python3.x # returns none if header not found response_size = opener.getheader("Content-Length") with open(fname, 'wb') as data: if(response_size is None): copyfileobj(opener, data) else: copyfileobj_withprogress(opener, data, response_size) def fetch_data(files, folder, data_size=None): """Downloads files to folder and checks their md5 checksums Parameters ---------- files : dictionary For each file in `files` the value should be (url, md5). The file will be downloaded from url if the file does not already exist or if the file exists but the md5 checksum does not match. folder : str The directory where to save the file, the directory will be created if it does not already exist. data_size : str, optional A string describing the size of the data (e.g. "91 MB") to be logged to the screen. Default does not produce any information about data size. Raises ------ FetcherError Raises if the md5 checksum of the file does not match the expected value. The downloaded file is not deleted when this error is raised. """ if not os.path.exists(folder): _log("Creating new folder %s" % (folder)) os.makedirs(folder) if data_size is not None: _log('Data size is approximately %s' % data_size) all_skip = True for f in files: url, md5 = files[f] fullpath = pjoin(folder, f) if os.path.exists(fullpath) and (_get_file_md5(fullpath) == md5): continue all_skip = False _log('Downloading "%s" to %s' % (f, folder)) _get_file_data(fullpath, url) check_md5(fullpath, md5) if all_skip: _already_there_msg(folder) else: _log("Files successfully downloaded to %s" % (folder)) def _make_fetcher(name, folder, baseurl, remote_fnames, local_fnames, md5_list=None, doc="", data_size=None, msg=None, unzip=False): """ Create a new fetcher Parameters ---------- name : str The name of the fetcher function. folder : str The full path to the folder in which the files would be placed locally. Typically, this is something like 'pjoin(dipy_home, 'foo')' baseurl : str The URL from which this fetcher reads files remote_fnames : list of strings The names of the files in the baseurl location local_fnames : list of strings The names of the files to be saved on the local filesystem md5_list : list of strings, optional The md5 checksums of the files. Used to verify the content of the files. Default: None, skipping checking md5. doc : str, optional. Documentation of the fetcher. data_size : str, optional. If provided, is sent as a message to the user before downloading starts. msg : str, optional. A message to print to screen when fetching takes place. Default (None) is to print nothing unzip : bool, optional Whether to unzip the file(s) after downloading them. Supports zip, gz, and tar.gz files. returns ------- fetcher : function A function that, when called, fetches data according to the designated inputs """ def fetcher(): files = {} for i, (f, n), in enumerate(zip(remote_fnames, local_fnames)): files[n] = (baseurl + f, md5_list[i] if md5_list is not None else None) fetch_data(files, folder, data_size) if msg is not None: print(msg) if unzip: for f in local_fnames: split_ext = os.path.splitext(f) if split_ext[-1] == '.gz' or split_ext[-1] == '.bz2': if os.path.splitext(split_ext[0])[-1] == '.tar': ar = tarfile.open(pjoin(folder, f)) ar.extractall(path=folder) ar.close() else: raise ValueError('File extension is not recognized') elif split_ext[-1] == '.zip': z = zipfile.ZipFile(pjoin(folder, f), 'r') z.extractall(folder) z.close() else: raise ValueError('File extension is not recognized') return files, folder fetcher.__name__ = name fetcher.__doc__ = doc return fetcher fetch_isbi2013_2shell = _make_fetcher( "fetch_isbi2013_2shell", pjoin(dipy_home, 'isbi2013'), UW_RW_URL + '1773/38465/', ['phantom64.nii.gz', 'phantom64.bval', 'phantom64.bvec'], ['phantom64.nii.gz', 'phantom64.bval', 'phantom64.bvec'], ['42911a70f232321cf246315192d69c42', '90e8cf66e0f4d9737a3b3c0da24df5ea', '4b7aa2757a1ccab140667b76e8075cb1'], doc="Download a 2-shell software phantom dataset", data_size="") fetch_stanford_labels = _make_fetcher( "fetch_stanford_labels", pjoin(dipy_home, 'stanford_hardi'), 'https://stacks.stanford.edu/file/druid:yx282xq2090/', ["aparc-reduced.nii.gz", "label_info.txt"], ["aparc-reduced.nii.gz", "label_info.txt"], ['742de90090d06e687ce486f680f6d71a', '39db9f0f5e173d7a2c2e51b07d5d711b'], doc="Download reduced freesurfer aparc image from stanford web site") fetch_sherbrooke_3shell = _make_fetcher( "fetch_sherbrooke_3shell", pjoin(dipy_home, 'sherbrooke_3shell'), UW_RW_URL + "1773/38475/", ['HARDI193.nii.gz', 'HARDI193.bval', 'HARDI193.bvec'], ['HARDI193.nii.gz', 'HARDI193.bval', 'HARDI193.bvec'], ['0b735e8f16695a37bfbd66aab136eb66', 'e9b9bb56252503ea49d31fb30a0ac637', '0c83f7e8b917cd677ad58a078658ebb7'], doc="Download a 3shell HARDI dataset with 192 gradient direction") fetch_stanford_hardi = _make_fetcher( "fetch_stanford_hardi", pjoin(dipy_home, 'stanford_hardi'), 'https://stacks.stanford.edu/file/druid:yx282xq2090/', ['dwi.nii.gz', 'dwi.bvals', 'dwi.bvecs'], ['HARDI150.nii.gz', 'HARDI150.bval', 'HARDI150.bvec'], ['0b18513b46132b4d1051ed3364f2acbc', '4e08ee9e2b1d2ec3fddb68c70ae23c36', '4c63a586f29afc6a48a5809524a76cb4'], doc="Download a HARDI dataset with 160 gradient directions") fetch_stanford_t1 = _make_fetcher( "fetch_stanford_t1", pjoin(dipy_home, 'stanford_hardi'), 'https://stacks.stanford.edu/file/druid:yx282xq2090/', ['t1.nii.gz'], ['t1.nii.gz'], ['a6a140da6a947d4131b2368752951b0a']) fetch_stanford_pve_maps = _make_fetcher( "fetch_stanford_pve_maps", pjoin(dipy_home, 'stanford_hardi'), 'https://stacks.stanford.edu/file/druid:yx282xq2090/', ['pve_csf.nii.gz', 'pve_gm.nii.gz', 'pve_wm.nii.gz'], ['pve_csf.nii.gz', 'pve_gm.nii.gz', 'pve_wm.nii.gz'], ['2c498e4fed32bca7f726e28aa86e9c18', '1654b20aeb35fc2734a0d7928b713874', '2e244983cf92aaf9f9d37bc7716b37d5']) fetch_taiwan_ntu_dsi = _make_fetcher( "fetch_taiwan_ntu_dsi", pjoin(dipy_home, 'taiwan_ntu_dsi'), UW_RW_URL + "1773/38480/", ['DSI203.nii.gz', 'DSI203.bval', 'DSI203.bvec', 'DSI203_license.txt'], ['DSI203.nii.gz', 'DSI203.bval', 'DSI203.bvec', 'DSI203_license.txt'], ['950408c0980a7154cb188666a885a91f', '602e5cb5fad2e7163e8025011d8a6755', 'a95eb1be44748c20214dc7aa654f9e6b', '7fa1d5e272533e832cc7453eeba23f44'], doc="Download a DSI dataset with 203 gradient directions", msg="See DSI203_license.txt for LICENSE. For the complete datasets please visit : \ http://dsi-studio.labsolver.org", data_size="91MB") fetch_syn_data = _make_fetcher( "fetch_syn_data", pjoin(dipy_home, 'syn_test'), UW_RW_URL + "1773/38476/", ['t1.nii.gz', 'b0.nii.gz'], ['t1.nii.gz', 'b0.nii.gz'], ['701bda02bb769655c7d4a9b1df2b73a6', 'e4b741f0c77b6039e67abb2885c97a78'], data_size="12MB", doc="Download t1 and b0 volumes from the same session") fetch_mni_template = _make_fetcher( "fetch_mni_template", pjoin(dipy_home, 'mni_template'), 'https://ndownloader.figshare.com/files/', ['5572676?private_link=4b8666116a0128560fb5', '5572673?private_link=93216e750d5a7e568bda', '5572670?private_link=33c92d54d1afb9aa7ed2', '5572661?private_link=584319b23e7343fed707'], ['mni_icbm152_t2_tal_nlin_asym_09a.nii', 'mni_icbm152_t1_tal_nlin_asym_09a.nii', 'mni_icbm152_t1_tal_nlin_asym_09c_mask.nii', 'mni_icbm152_t1_tal_nlin_asym_09c.nii'], ['f41f2e1516d880547fbf7d6a83884f0d', '1ea8f4f1e41bc17a94602e48141fdbc8', 'a243e249cd01a23dc30f033b9656a786', '3d5dd9b0cd727a17ceec610b782f66c1'], doc="fetch the MNI 2009a T1 and T2, and 2009c T1 and T1 mask files", data_size="70MB") fetch_scil_b0 = _make_fetcher( "fetch_scil_b0", dipy_home, UW_RW_URL + "1773/38479/", ['datasets_multi-site_all_companies.zip'], ['datasets_multi-site_all_companies.zip'], None, doc="Download b=0 datasets from multiple MR systems (GE, Philips, Siemens) \ and different magnetic fields (1.5T and 3T)", data_size="9.2MB", unzip=True) fetch_viz_icons = _make_fetcher("fetch_viz_icons", pjoin(dipy_home, "icons"), UW_RW_URL + "1773/38478/", ['icomoon.tar.gz'], ['icomoon.tar.gz'], ['94a07cba06b4136b6687396426f1e380'], data_size="12KB", doc="Download icons for dipy.viz", unzip=True) fetch_bundles_2_subjects = _make_fetcher( "fetch_bundles_2_subjects", pjoin(dipy_home, 'exp_bundles_and_maps'), UW_RW_URL + '1773/38477/', ['bundles_2_subjects.tar.gz'], ['bundles_2_subjects.tar.gz'], ['97756fbef11ce2df31f1bedf1fc7aac7'], data_size="234MB", doc="Download 2 subjects from the SNAIL dataset with their bundles", unzip=True) fetch_ivim = _make_fetcher( "fetch_ivim", pjoin(dipy_home, 'ivim'), 'https://ndownloader.figshare.com/files/', ['5305243', '5305246', '5305249'], ['ivim.nii.gz', 'ivim.bval', 'ivim.bvec'], ['cda596f89dc2676af7d9bf1cabccf600', 'f03d89f84aa9a9397103a400e43af43a', 'fb633a06b02807355e49ccd85cb92565'], doc="Download IVIM dataset") fetch_cfin_multib = _make_fetcher( "fetch_cfin_multib", pjoin(dipy_home, 'cfin_multib'), UW_RW_URL + '/1773/38488/', ['T1.nii', '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.nii', '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bval', '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bvec'], ['T1.nii', '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.nii', '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bval', '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bvec'], ['889883b5e7d93a6e372bc760ea887e7c', '9daea1d01d68fd0055a3b34f5ffd5f6e', '3ee44135fde7ea5c9b8c801414bdde2c', '948373391de950e7cc1201ba9f696bf0'], doc="Download CFIN multi b-value diffusion data", msg=("This data was provided by Brian Hansen and Sune Jespersen" + " More details about the data are available in their paper: " + " https://www.nature.com/articles/sdata201672")) def read_scil_b0(): """ Load GE 3T b0 image form the scil b0 dataset. Returns ------- img : obj, Nifti1Image """ file = pjoin(dipy_home, 'datasets_multi-site_all_companies', '3T', 'GE', 'b0.nii.gz') return nib.load(file) def read_siemens_scil_b0(): """ Load Siemens 1.5T b0 image form the scil b0 dataset. Returns ------- img : obj, Nifti1Image """ file = pjoin(dipy_home, 'datasets_multi-site_all_companies', '1.5T', 'Siemens', 'b0.nii.gz') return nib.load(file) def read_isbi2013_2shell(): """ Load ISBI 2013 2-shell synthetic dataset Returns ------- img : obj, Nifti1Image gtab : obj, GradientTable """ files, folder = fetch_isbi2013_2shell() fraw = pjoin(folder, 'phantom64.nii.gz') fbval = pjoin(folder, 'phantom64.bval') fbvec = pjoin(folder, 'phantom64.bvec') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) return img, gtab def read_sherbrooke_3shell(): """ Load Sherbrooke 3-shell HARDI dataset Returns ------- img : obj, Nifti1Image gtab : obj, GradientTable """ files, folder = fetch_sherbrooke_3shell() fraw = pjoin(folder, 'HARDI193.nii.gz') fbval = pjoin(folder, 'HARDI193.bval') fbvec = pjoin(folder, 'HARDI193.bvec') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) return img, gtab def read_stanford_labels(): """Read stanford hardi data and label map""" # First get the hardi data fetch_stanford_hardi() hard_img, gtab = read_stanford_hardi() # Fetch and load files, folder = fetch_stanford_labels() labels_file = pjoin(folder, "aparc-reduced.nii.gz") labels_img = nib.load(labels_file) return hard_img, gtab, labels_img def read_stanford_hardi(): """ Load Stanford HARDI dataset Returns ------- img : obj, Nifti1Image gtab : obj, GradientTable """ files, folder = fetch_stanford_hardi() fraw = pjoin(folder, 'HARDI150.nii.gz') fbval = pjoin(folder, 'HARDI150.bval') fbvec = pjoin(folder, 'HARDI150.bvec') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) return img, gtab def read_stanford_t1(): files, folder = fetch_stanford_t1() f_t1 = pjoin(folder, 't1.nii.gz') img = nib.load(f_t1) return img def read_stanford_pve_maps(): files, folder = fetch_stanford_pve_maps() f_pve_csf = pjoin(folder, 'pve_csf.nii.gz') f_pve_gm = pjoin(folder, 'pve_gm.nii.gz') f_pve_wm = pjoin(folder, 'pve_wm.nii.gz') img_pve_csf = nib.load(f_pve_csf) img_pve_gm = nib.load(f_pve_gm) img_pve_wm = nib.load(f_pve_wm) return (img_pve_csf, img_pve_gm, img_pve_wm) def read_taiwan_ntu_dsi(): """ Load Taiwan NTU dataset Returns ------- img : obj, Nifti1Image gtab : obj, GradientTable """ files, folder = fetch_taiwan_ntu_dsi() fraw = pjoin(folder, 'DSI203.nii.gz') fbval = pjoin(folder, 'DSI203.bval') fbvec = pjoin(folder, 'DSI203.bvec') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) bvecs[1:] = (bvecs[1:] / np.sqrt(np.sum(bvecs[1:] * bvecs[1:], axis=1))[:, None]) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) return img, gtab def read_syn_data(): """ Load t1 and b0 volumes from the same session Returns ------- t1 : obj, Nifti1Image b0 : obj, Nifti1Image """ files, folder = fetch_syn_data() t1_name = pjoin(folder, 't1.nii.gz') b0_name = pjoin(folder, 'b0.nii.gz') t1 = nib.load(t1_name) b0 = nib.load(b0_name) return t1, b0 def fetch_tissue_data(): """ Download images to be used for tissue classification """ t1 = 'https://ndownloader.figshare.com/files/6965969' t1d = 'https://ndownloader.figshare.com/files/6965981' ap = 'https://ndownloader.figshare.com/files/6965984' folder = pjoin(dipy_home, 'tissue_data') md5_list = ['99c4b77267a6855cbfd96716d5d65b70', # t1 '4b87e1b02b19994fbd462490cc784fa3', # t1d 'c0ea00ed7f2ff8b28740f18aa74bff6a'] # ap url_list = [t1, t1d, ap] fname_list = ['t1_brain.nii.gz', 't1_brain_denoised.nii.gz', 'power_map.nii.gz'] if not os.path.exists(folder): print('Creating new directory %s' % folder) os.makedirs(folder) msg = 'Downloading 3 Nifti1 images (9.3MB)...' print(msg) for i in range(len(md5_list)): _get_file_data(pjoin(folder, fname_list[i]), url_list[i]) check_md5(pjoin(folder, fname_list[i]), md5_list[i]) print('Done.') print('Files copied in folder %s' % folder) else: _already_there_msg(folder) def read_tissue_data(contrast='T1'): """ Load images to be used for tissue classification Parameters ---------- constrast : str 'T1', 'T1 denoised' or 'Anisotropic Power' Returns ------- image : obj, Nifti1Image """ folder = pjoin(dipy_home, 'tissue_data') t1_name = pjoin(folder, 't1_brain.nii.gz') t1d_name = pjoin(folder, 't1_brain_denoised.nii.gz') ap_name = pjoin(folder, 'power_map.nii.gz') md5_dict = {'t1': '99c4b77267a6855cbfd96716d5d65b70', 't1d': '4b87e1b02b19994fbd462490cc784fa3', 'ap': 'c0ea00ed7f2ff8b28740f18aa74bff6a'} check_md5(t1_name, md5_dict['t1']) check_md5(t1d_name, md5_dict['t1d']) check_md5(ap_name, md5_dict['ap']) if contrast == 'T1 denoised': return nib.load(t1d_name) elif contrast == 'Anisotropic Power': return nib.load(ap_name) else: return nib.load(t1_name) mni_notes = \ """ Notes ----- The templates were downloaded from the MNI (McGill University) `website <http://www.bic.mni.mcgill.ca/ServicesAtlases/ICBM152NLin2009>`_ in July 2015. The following publications should be referenced when using these templates: .. [1] VS Fonov, AC Evans, K Botteron, CR Almli, RC McKinstry, DL Collins and BDCG, Unbiased average age-appropriate atlases for pediatric studies, NeuroImage, 54:1053-8119, DOI: 10.1016/j.neuroimage.2010.07.033 .. [2] VS Fonov, AC Evans, RC McKinstry, CR Almli and DL Collins, Unbiased nonlinear average age-appropriate brain templates from birth to adulthood, NeuroImage, 47:S102 Organization for Human Brain Mapping 2009 Annual Meeting, DOI: http://dx.doi.org/10.1016/S1053-8119(09)70884-5 License for the MNI templates: ----------------------------- Copyright (C) 1993-2004, Louis Collins McConnell Brain Imaging Centre, Montreal Neurological Institute, McGill University. Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appear in all copies. The authors and McGill University make no representations about the suitability of this software for any purpose. It is provided "as is" without express or implied warranty. The authors are not responsible for any data loss, equipment damage, property loss, or injury to subjects or patients resulting from the use or misuse of this software package. """ def read_mni_template(version="a", contrast="T2"): """ Read the MNI template from disk Parameters ---------- version: string There are two MNI templates 2009a and 2009c, so options available are: "a" and "c". contrast : list or string, optional Which of the contrast templates to read. For version "a" two contrasts are available: "T1" and "T2". Similarly for version "c" there are two options, "T1" and "mask". You can input contrast as a string or a list Returns ------- list : contains the nibabel.Nifti1Image objects requested, according to the order they were requested in the input. Examples -------- Get only the T1 file for version c: >>> T1_nifti = read_mni_template("c", contrast = "T1") # doctest: +SKIP Get both files in this order for version a: >>> T1_nifti, T2_nifti = read_mni_template(contrast = ["T1", "T2"]) # doctest: +SKIP """ files, folder = fetch_mni_template() file_dict_a = {"T1": pjoin(folder, 'mni_icbm152_t1_tal_nlin_asym_09a.nii'), "T2": pjoin(folder, 'mni_icbm152_t2_tal_nlin_asym_09a.nii')} file_dict_c = { "T1": pjoin( folder, 'mni_icbm152_t1_tal_nlin_asym_09c.nii'), "mask": pjoin( folder, 'mni_icbm152_t1_tal_nlin_asym_09c_mask.nii')} if contrast == "T2" and version == "c": raise ValueError("No T2 image for MNI template 2009c") if contrast == "mask" and version == "a": raise ValueError("No template mask available for MNI 2009a") if not(isinstance(contrast, str)) and version == "c": for k in contrast: if k == "T2": raise ValueError("No T2 image for MNI template 2009c") if version == "a": if isinstance(contrast, str): return nib.load(file_dict_a[contrast]) else: out_list = [] for k in contrast: out_list.append(nib.load(file_dict_a[k])) elif version == "c": if isinstance(contrast, str): return nib.load(file_dict_c[contrast]) else: out_list = [] for k in contrast: out_list.append(nib.load(file_dict_c[k])) else: raise ValueError("Only 2009a and 2009c versions are available") return out_list # Add the references to both MNI-related functions: read_mni_template.__doc__ += mni_notes fetch_mni_template.__doc__ += mni_notes def fetch_cenir_multib(with_raw=False): """ Fetch 'HCP-like' data, collected at multiple b-values Parameters ---------- with_raw : bool Whether to fetch the raw data. Per default, this is False, which means that only eddy-current/motion corrected data is fetched """ folder = pjoin(dipy_home, 'cenir_multib') fname_list = ['4D_dwi_eddycor_B200.nii.gz', 'dwi_bvals_B200', 'dwi_bvecs_B200', '4D_dwieddycor_B400.nii.gz', 'bvals_B400', 'bvecs_B400', '4D_dwieddycor_B1000.nii.gz', 'bvals_B1000', 'bvecs_B1000', '4D_dwieddycor_B2000.nii.gz', 'bvals_B2000', 'bvecs_B2000', '4D_dwieddycor_B3000.nii.gz', 'bvals_B3000', 'bvecs_B3000'] md5_list = ['fd704aa3deb83c1c7229202cb3db8c48', '80ae5df76a575fe5bf9f1164bb0d4cfb', '18e90f8a3e6a4db2457e5b1ba1cc98a9', '3d0f2b8ef7b6a4a3aa5c4f7a90c9cfec', 'c38056c40c9cc42372232d6e75c47f54', '810d79b4c30cb7dff3b2000017d5f72a', 'dde8037601a14436b2173f4345b5fd17', '97de6a492ae304f39e0b418b6ebac64c', 'f28a0faa701bdfc66e31bde471a5b992', 'c5e4b96e3afdee99c0e994eff3b2331a', '9c83b8d5caf9c3def240f320f2d2f56c', '05446bd261d57193d8dbc097e06db5ff', 'f0d70456ce424fda2cecd48e64f3a151', '336accdb56acbbeff8dac1748d15ceb8', '27089f3baaf881d96f6a9da202e3d69b'] if with_raw: fname_list.extend(['4D_dwi_B200.nii.gz', '4D_dwi_B400.nii.gz', '4D_dwi_B1000.nii.gz', '4D_dwi_B2000.nii.gz', '4D_dwi_B3000.nii.gz']) md5_list.extend(['a8c36e76101f2da2ca8119474ded21d5', 'a0e7939f6d977458afbb2f4659062a79', '87fc307bdc2e56e105dffc81b711a808', '7c23e8a5198624aa29455f0578025d4f', '4e4324c676f5a97b3ded8bbb100bf6e5']) files = {} baseurl = UW_RW_URL + '1773/33311/' for f, m in zip(fname_list, md5_list): files[f] = (baseurl + f, m) fetch_data(files, folder) return files, folder def read_cenir_multib(bvals=None): """ Read CENIR multi b-value data Parameters ---------- bvals : list or int The b-values to read from file (200, 400, 1000, 2000, 3000). Returns ------- gtab : a GradientTable class instance img : nibabel.Nifti1Image """ files, folder = fetch_cenir_multib(with_raw=False) if bvals is None: bvals = [200, 400, 1000, 2000, 3000] if isinstance(bvals, int): bvals = [bvals] file_dict = {200: {'DWI': pjoin(folder, '4D_dwi_eddycor_B200.nii.gz'), 'bvals': pjoin(folder, 'dwi_bvals_B200'), 'bvecs': pjoin(folder, 'dwi_bvecs_B200')}, 400: {'DWI': pjoin(folder, '4D_dwieddycor_B400.nii.gz'), 'bvals': pjoin(folder, 'bvals_B400'), 'bvecs': pjoin(folder, 'bvecs_B400')}, 1000: {'DWI': pjoin(folder, '4D_dwieddycor_B1000.nii.gz'), 'bvals': pjoin(folder, 'bvals_B1000'), 'bvecs': pjoin(folder, 'bvecs_B1000')}, 2000: {'DWI': pjoin(folder, '4D_dwieddycor_B2000.nii.gz'), 'bvals': pjoin(folder, 'bvals_B2000'), 'bvecs': pjoin(folder, 'bvecs_B2000')}, 3000: {'DWI': pjoin(folder, '4D_dwieddycor_B3000.nii.gz'), 'bvals': pjoin(folder, 'bvals_B3000'), 'bvecs': pjoin(folder, 'bvecs_B3000')}} data = [] bval_list = [] bvec_list = [] for bval in bvals: data.append(nib.load(file_dict[bval]['DWI']).get_data()) bval_list.extend(np.loadtxt(file_dict[bval]['bvals'])) bvec_list.append(np.loadtxt(file_dict[bval]['bvecs'])) # All affines are the same, so grab the last one: aff = nib.load(file_dict[bval]['DWI']).affine return (nib.Nifti1Image(np.concatenate(data, -1), aff), gradient_table(bval_list, np.concatenate(bvec_list, -1))) CENIR_notes = \ """ Notes ----- Details of the acquisition and processing, and additional meta-data are available through `UW researchworks <https://digital.lib.washington.edu/researchworks/handle/1773/33311>`_ """ fetch_cenir_multib.__doc__ += CENIR_notes read_cenir_multib.__doc__ += CENIR_notes def read_viz_icons(style='icomoon', fname='infinity.png'): """ Read specific icon from specific style Parameters ---------- style : str Current icon style. Default is icomoon. fname : str Filename of icon. This should be found in folder HOME/.dipy/style/. Default is infinity.png. Returns -------- path : str Complete path of icon. """ folder = pjoin(dipy_home, 'icons', style) return pjoin(folder, fname) def read_bundles_2_subjects(subj_id='subj_1', metrics=['fa'], bundles=['af.left', 'cst.right', 'cc_1']): r""" Read images and streamlines from 2 subjects of the SNAIL dataset Parameters ---------- subj_id : string Either ``subj_1`` or ``subj_2``. metrics : list Either ['fa'] or ['t1'] or ['fa', 't1'] bundles : list Example ['af.left', 'cst.right', 'cc_1']. See all the available bundles in the ``exp_bundles_maps/bundles_2_subjects`` directory of your ``$HOME/.dipy`` folder. Returns ------- dix : dict Dictionary with data of the metrics and the bundles as keys. Notes ----- If you are using these datasets please cite the following publications. References ---------- .. [1] Renauld, E., M. Descoteaux, M. Bernier, E. Garyfallidis, K. Whittingstall, "Morphology of thalamus, LGN and optic radiation do not influence EEG alpha waves", Plos One (under submission), 2015. .. [2] Garyfallidis, E., O. Ocegueda, D. Wassermann, M. Descoteaux. Robust and efficient linear registration of fascicles in the space of streamlines , Neuroimage, 117:124-140, 2015. """ dname = pjoin(dipy_home, 'exp_bundles_and_maps', 'bundles_2_subjects') from nibabel import trackvis as tv res = {} if 't1' in metrics: img = nib.load(pjoin(dname, subj_id, 't1_warped.nii.gz')) data = img.get_data() affine = img.affine res['t1'] = data if 'fa' in metrics: img_fa = nib.load(pjoin(dname, subj_id, 'fa_1x1x1.nii.gz')) fa = img_fa.get_data() affine = img_fa.affine res['fa'] = fa res['affine'] = affine for bun in bundles: streams, hdr = tv.read(pjoin(dname, subj_id, 'bundles', 'bundles_' + bun + '.trk'), points_space="rasmm") streamlines = [s[0] for s in streams] res[bun] = streamlines return res def read_ivim(): """ Load IVIM dataset Returns ------- img : obj, Nifti1Image gtab : obj, GradientTable """ files, folder = fetch_ivim() fraw = pjoin(folder, 'ivim.nii.gz') fbval = pjoin(folder, 'ivim.bval') fbvec = pjoin(folder, 'ivim.bvec') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) return img, gtab def read_cfin_dwi(): """Load CFIN multi b-value DWI data Returns ------- img : obj, Nifti1Image gtab : obj, GradientTable """ files, folder = fetch_cfin_multib() fraw = pjoin(folder, '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.nii') fbval = pjoin(folder, '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bval') fbvec = pjoin(folder, '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bvec') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) return img, gtab def read_cfin_t1(): """Load CFIN T1-weighted data. Returns ------- img : obj, Nifti1Image """ files, folder = fetch_cfin_multib() img = nib.load(pjoin(folder, 'T1.nii')) return img, gtab
nilgoyyou/dipy
dipy/data/fetcher.py
Python
bsd-3-clause
33,864
[ "Brian", "VisIt" ]
5877868e809277abeef3acbbb90637a6c2f4ada9e05b7c6cb162067ee7cb09d4
from .galaxies import * from .galaxy import * from .simgalaxy import * from .starpop import * from .asts import *
philrosenfield/ResolvedStellarPops
galaxies/__init__.py
Python
bsd-3-clause
114
[ "Galaxy" ]
cdb9ca9c9b906b34cf77d95d863469230ee53c6c567307bf4c7b8f868385360c
# -*- coding: UTF-8 -*- import numpy as np from numpy import linspace from igakit.nurbs import NURBS from .op_nurbs import opNURBS from .io import XML, TXT, NML import sys from caid.core import bspline as bsplinelib _bsp = bsplinelib.bsp from numpy import pi, sqrt, array, zeros # ... TODO to move from here def couple_vers_entier(x,y): if (x == 0) : if (y == 0) : return 1 else : return 3 else : if (y == 0) : return 0 else : return 2 def entier_vers_couple(x): if (x == 0) : return [1,0] elif (x == 1) : return [0,0] elif (x == 2) : return [1,1] elif (x == 3) : return [0,1] # ... def line(n=None, p=None): """Creates a unit line cad_geometry object. Kwargs: n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. """ points = np.asarray([[0.,0.],[1.,0.]]) return linear(points=points, n=n, p=p) def periodic_line(n=None, p=None): """Creates a unit line cad_geometry object. Kwargs: n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. """ points = np.asarray([[0.,0.],[1.,0.]]) return periodic_linear(points=points, n=n, p=p) def square(n=None, p=None, m=None): """Creates a unit square cad_geometry object. Kwargs: n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. """ points = np.asarray([[[0.,0.],[0.,1.]],[[1.,0.],[1.,1.]]]) return bilinear(points=points, n=n, p=p, m=m) def periodic_square(n=None, p=None): """Creates a unit square cad_geometry object. Kwargs: n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. """ points = np.asarray([[[0.,0.],[0.,1.]],[[1.,0.],[1.,1.]]]) return periodic_bilinear(points=points, n=n, p=p) def triangle(n=None, p=None, points=None, m=None, profile=0): """Creates a degenerated triangle cad_geometry object. Kwargs: n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None profile (int): is the triangle type. The redandant Control Points depends on the profile value. Returns: A cad_geometry object. """ if points is None: A = [0.,0.] B = [0.,1.] C = [1.,0.] else: A = points[0] B = points[1] C = points[2] if profile == 0: points = np.asarray([[A,A],[B,C]]) if profile == 1: points = np.asarray([[A,B],[C,C]]) if profile == 2: points = np.asarray([[A,B],[C,B]]) if profile == 3: D = [.5*(b+c) for (b,c) in zip(B,C)] points = np.asarray([[A,B],[C,D]]) return bilinear(points=points, n=n, p=p, m=m) def linear(points=None, n=None, p=None): from igakit.cad import linear as nrb_linear """Creates a linear cad_geometry object. Kwargs: points (float array): The extremeties of the linear n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. p[0] p[1] o------------o +----> u """ nrb = nrb_linear(points) cad_nrb = cad_nurbs(nrb.knots, nrb.points, weights=nrb.weights) cad_nrb.rational = False cad_nrb.orientation = [-1,1] geo = cad_geometry() geo.append(cad_nrb) # ... refinement list_t = None if n is not None: list_t = [] for axis in range(0,cad_nrb.dim): ub = cad_nrb.knots[axis][0] ue = cad_nrb.knots[axis][-1] list_t.append(np.linspace(ub,ue,n[axis]+2)[1:-1]) list_p = None if p is not None: list_p = [] for axis in range(0,cad_nrb.dim): list_p.append(p[axis] - cad_nrb.degree[axis]) geo.refine(list_t=list_t, list_p=list_p) # ... geo._internal_faces = [] geo._external_faces = [[0,0],[0,1]] geo._connectivity = [] return geo def periodic_linear(points=None, n=None, p=None): geo1d = linear(points=points, n=n, p=p) c0 = geo1d[0] c1 = c0.unclamp(0) geo = cad_geometry() geo.append(c1) list_connectivity = [] dict_con = {} dict_con['original'] = [0,0] dict_con['clone'] = [0,1] dict_con['periodic'] = True list_connectivity.append(dict_con) geo._connectivity = list_connectivity geo._internal_faces = [[0,0],[0,1]] geo._external_faces = [] return geo def arc(radius=1, center=None, angle=pi/2, n=None, p=None): from igakit.cad import circle """Creates a linear cad_geometry object. Kwargs: points (float array): The extremeties of the linear n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. p[0] p[1] o------------o +----> u """ nrb = circle(radius=radius, center=center, angle=angle) cad_nrb = cad_nurbs(nrb.knots, nrb.points, weights=nrb.weights) cad_nrb.rational = False cad_nrb.orientation = [-1,1] geo = cad_geometry() geo.append(cad_nrb) # ... refinement list_t = None if n is not None: list_t = [] for axis in range(0,cad_nrb.dim): ub = cad_nrb.knots[axis][0] ue = cad_nrb.knots[axis][-1] list_t.append(np.linspace(ub,ue,n[axis]+2)[1:-1]) list_p = None if p is not None: list_p = [] for axis in range(0,cad_nrb.dim): list_p.append(p[axis] - cad_nrb.degree[axis]) geo.refine(list_t=list_t, list_p=list_p) # ... geo._connectivity = [] if angle % 2*np.pi: geo._internal_faces = [] geo._external_faces = [[0,0],[0,1]] else: geo._internal_faces = [[0,0],[0,1]] geo._external_faces = [] dict_con = {}; dict_con['original'] = [0,0]; dict_con['clone'] = [0,1]; geo._connectivity.append(dict_con) return geo def bilinear(points=None, n=None, p=None, m=None): from igakit.cad import bilinear as nrb_bilinear """Creates a bilinear cad_geometry object. Kwargs: points (array): The summits of the quadrangle. n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None m (list int): This is a list containing the multiplicity of inserted knots. len(m)=nrb.dim. default None Returns: A cad_geometry object. p[0,1] p[1,1] o------------o | v | | ^ | | | | | +----> u | o------------o p[0,0] p[1,0] """ nrb = nrb_bilinear(points) cad_nrb = cad_nurbs(nrb.knots, nrb.points, weights=nrb.weights) cad_nrb.rational = False cad_nrb.orientation = [1,-1,-1,1] geo = cad_geometry() geo.append(cad_nrb) # ... refinement list_t = None if n is not None: list_t = [] for axis in range(0,cad_nrb.dim): ub = cad_nrb.knots[axis][0] ue = cad_nrb.knots[axis][-1] list_t.append(np.linspace(ub,ue,n[axis]+2)[1:-1]) list_p = None if p is not None: list_p = [] for axis in range(0,cad_nrb.dim): list_p.append(p[axis] - cad_nrb.degree[axis]) geo.refine(list_t=list_t, list_p=list_p, list_m=m) # ... geo._internal_faces = [] geo._external_faces = [[0,0],[0,1],[0,2],[0,3]] geo._connectivity = [] return geo def periodic_bilinear(points=None, n=None, p=None): geo_init = bilinear(points=points, n=n, p=p) nrb0 = geo_init[0] nrb1 = nrb0.unclamp(0).unclamp(1) geo = cad_geometry() geo.append(nrb1) list_connectivity = [] dict_con = {} dict_con['original'] = [0,0] dict_con['clone'] = [0,2] dict_con['periodic'] = True list_connectivity.append(dict_con) dict_con = {} dict_con['original'] = [0,1] dict_con['clone'] = [0,3] dict_con['periodic'] = True list_connectivity.append(dict_con) geo._connectivity = list_connectivity geo._internal_faces = [[0,0],[0,1],[0,2],[0,3]] geo._external_faces = [] return geo def circle(radius=1.0, center=None, n=None, p=None): """Creates a 2D circle with 1 patch as cad_geometry object. Kwargs: radius (float): The radius of the circle. default 1. n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. """ s = 1./np.sqrt(2) knots = [ [0.0 , 0.0 , 0.0 , 1.0 , 1.0 , 1.0] \ , [0.0 , 0.0 , 0.0 , 1.0 , 1.0 , 1.0] ] points = np.zeros((3,3,3)) points[0,0,:] = np.asarray([-s , -s , 0.]) points[1,0,:] = np.asarray([-2*s , 0. , 0.]) points[2,0,:] = np.asarray([-s , s , 0.]) points[0,1,:] = np.asarray([0. , -2*s , 0.]) points[1,1,:] = np.asarray([0. , 0.0 , 0.]) points[2,1,:] = np.asarray([0. , 2*s , 0.]) points[0,2,:] = np.asarray([s , -s , 0.]) points[1,2,:] = np.asarray([2*s , 0. , 0.]) points[2,2,:] = np.asarray([s , s , 0.]) points *= radius if center is not None: points[...,0] += center[0] points[...,1] += center[1] try: points[...,2] += center[2] except: pass weights = np.zeros((3,3)) weights[0,0] = 1. weights[1,0] = s weights[2,0] = 1. weights[0,1] = s weights[1,1] = 1. weights[2,1] = s weights[0,2] = 1. weights[1,2] = s weights[2,2] = 1. cad_nrb = cad_nurbs(knots, points, weights=weights) cad_nrb.rational = True cad_nrb.orientation = [-1,1,1,-1] geo = cad_geometry() geo.append(cad_nrb) # ... refinement list_t = None if n is not None: list_t = [] for axis in range(0,cad_nrb.dim): ub = cad_nrb.knots[axis][0] ue = cad_nrb.knots[axis][-1] list_t.append(np.linspace(ub,ue,n[axis]+2)[1:-1]) list_p = None if p is not None: list_p = [] for axis in range(0,cad_nrb.dim): list_p.append(p[axis] - cad_nrb.degree[axis]) geo.refine(list_t=list_t, list_p=list_p) # ... geo._internal_faces = [] geo._external_faces = [[0,0],[0,1],[0,2],[0,3]] geo._connectivity = [] return geo def quart_circle(rmin=0.5, rmax=1.0, center=None, n=None, p=None): """Creates a 2D quarter circle with 1 patch as cad_geometry object. Kwargs: rmin (float): Minimal radius of the quart-circle. default 0.5 rmax (float): Maximal radius of the quart-circle. default 1.0 n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. """ knots = [ [0.0 , 0.0 , 0.0 , 1.0 , 1.0 , 1.0] \ , [0.0 , 0.0 , 1.0 , 1.0] ] points = np.zeros((3,2,3)) j = 0 points[0,j,:] = np.asarray([0.0 , -rmin , 0.0]) points[1,j,:] = np.asarray([-rmin , -rmin , 0.0]) points[2,j,:] = np.asarray([-rmin , 0.0 , 0.0]) j = 1 points[0,j,:] = np.asarray([0.0 , -rmax , 0.0]) points[1,j,:] = np.asarray([-rmax , -rmax , 0.0]) points[2,j,:] = np.asarray([-rmax , 0.0 , 0.0]) if center is not None: points[...,0] += center[0] points[...,1] += center[1] try: points[...,2] += center[2] except: pass weights = np.zeros((3,2)) j = 0 weights[0,j] = 1.0 weights[1,j] = 0.707106781187 weights[2,j] = 1.0 j = 1 weights[0,j] = 1.0 weights[1,j] = 0.707106781187 weights[2,j] = 1.0 cad_nrb = cad_nurbs(knots, points, weights=weights) cad_nrb.rational = True cad_nrb.orientation = [1,-1,-1,1] geo = cad_geometry() geo.append(cad_nrb) # ... refinement list_t = None if n is not None: list_t = [] for axis in range(0,cad_nrb.dim): ub = cad_nrb.knots[axis][0] ue = cad_nrb.knots[axis][-1] list_t.append(np.linspace(ub,ue,n[axis]+2)[1:-1]) list_p = None if p is not None: list_p = [] for axis in range(0,cad_nrb.dim): list_p.append(p[axis] - cad_nrb.degree[axis]) geo.refine(list_t=list_t, list_p=list_p) # ... geo._internal_faces = [] geo._external_faces = [[0,0],[0,1],[0,2],[0,3]] geo._connectivity = [] return geo def annulus(rmin=0.5, rmax=1.0, center=None, n=None, p=None): """Creates a 2D annulus with 1 patch as cad_geometry object. Kwargs: rmin (float): Minimal radius of the annulus. default 0.5 rmax (float): Maximal radius of the annulus. default 1.0 n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. """ knots = [ [0.0 , 0.0 , 1.0 , 1.0] \ , [0.0 , 0.0 , 0.0 , 0.25 , 0.25 , 0.5 , 0.5 , 0.75 , 0.75 , 1.0 , 1.0 , 1.0] ] points = np.zeros((2,9,3)) j = 0 points[j,0,:] = np.asarray([0.0 , -rmin , 0.0]) points[j,1,:] = np.asarray([-rmin , -rmin , 0.0]) points[j,2,:] = np.asarray([-rmin , 0.0 , 0.0]) points[j,3,:] = np.asarray([-rmin , rmin , 0.0]) points[j,4,:] = np.asarray([0.0 , rmin , 0.0]) points[j,5,:] = np.asarray([rmin , rmin , 0.0]) points[j,6,:] = np.asarray([rmin , 0.0 , 0.0]) points[j,7,:] = np.asarray([rmin , -rmin , 0.0]) points[j,8,:] = np.asarray([0.0 , -rmin , 0.0]) j = 1 points[j,0,:] = np.asarray([0.0 , -rmax , 0.0]) points[j,1,:] = np.asarray([-rmax , -rmax , 0.0]) points[j,2,:] = np.asarray([-rmax , 0.0 , 0.0]) points[j,3,:] = np.asarray([-rmax , rmax , 0.0]) points[j,4,:] = np.asarray([0.0 , rmax , 0.0]) points[j,5,:] = np.asarray([rmax , rmax , 0.0]) points[j,6,:] = np.asarray([rmax , 0.0 , 0.0]) points[j,7,:] = np.asarray([rmax , -rmax , 0.0]) points[j,8,:] = np.asarray([0.0 , -rmax , 0.0]) if center is not None: points[...,0] += center[0] points[...,1] += center[1] try: points[...,2] += center[2] except: pass weights = np.zeros((2,9)) j = 0 weights[j,0] = 1.0 weights[j,1] = 0.707106781187 weights[j,2] = 1.0 weights[j,3] = 0.707106781187 weights[j,4] = 1.0 weights[j,5] = 0.707106781187 weights[j,6] = 1.0 weights[j,7] = 0.707106781187 weights[j,8] = 1.0 j = 1 weights[j,0] = 1.0 weights[j,1] = 0.707106781187 weights[j,2] = 1.0 weights[j,3] = 0.707106781187 weights[j,4] = 1.0 weights[j,5] = 0.707106781187 weights[j,6] = 1.0 weights[j,7] = 0.707106781187 weights[j,8] = 1.0 cad_nrb = cad_nurbs(knots, points, weights=weights) cad_nrb.rational = True cad_nrb.orientation = [-1,1,1,-1] geo = cad_geometry() geo.append(cad_nrb) # ... refinement list_t = None if n is not None: list_t = [] for axis in range(0,cad_nrb.dim): ub = cad_nrb.knots[axis][0] ue = cad_nrb.knots[axis][-1] U = np.linspace(ub,ue,n[axis]+2)[1:-1] if axis==1: U = [u for u in U if u not in [0.25, 0.5, 0.75]] list_t.append(U) list_p = None if p is not None: list_p = [] for axis in range(0,cad_nrb.dim): list_p.append(p[axis] - cad_nrb.degree[axis]) geo.refine(list_t=list_t, list_p=list_p) # ... if rmin > 0.: geo._internal_faces = [[0,0],[0,2]] geo._external_faces = [[0,1],[0,3]] if rmin == 0.: geo._internal_faces = [[0,0],[0,1],[0,2]] geo._external_faces = [[0,3]] geo._connectivity = [] dict_con = {}; dict_con['original'] = [0,0]; dict_con['clone'] = [0,2]; geo._connectivity.append(dict_con) return geo def circle_5mp(rmin=0.5, rmax=1.0, center=None, n=None, p=None): """Creates a 2D description of the circle using 5 patchs. Kwargs: rmin (float): Minimal radius of the annulus. default 0.5 rmax (float): Maximal radius of the circle. default 1.0 n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. """ # ... Import the quart_circle domain geo_1 = quart_circle(rmin=rmin, rmax=rmax, center=center, n=n,p=p) geo_1[0].transpose() # ... Import the quart_circle domain geo_2 = quart_circle(rmin=rmin, rmax=rmax, center=center, n=n,p=p) geo_2[0].rotate(0.5*np.pi) geo_2[0].transpose() # ... Import the quart_circle domain geo_3 = quart_circle(rmin=rmin, rmax=rmax, center=center, n=n,p=p) geo_3[0].rotate(np.pi) geo_3[0].reverse(0) # ... Import the quart_circle domain geo_4 = quart_circle(rmin=rmin, rmax=rmax, center=center, n=n,p=p) geo_4[0].rotate(1.5*np.pi) geo_4[0].reverse(0) # ... Import the circle domain geo_5 = circle(radius=rmin, center=center,n=n,p=p) geo_5[0].rotate(0.25*np.pi) geo_5[0].rotate(0.5*np.pi) geo_12 = geo_1.merge(geo_2) geo_34 = geo_3.merge(geo_4) geo_1234 = geo_12.merge(geo_34) geo = geo_1234.merge(geo_5) return geo def pinched_quart_circle(rmin=0.5, rmax=1.0, epsilon=0.5, center=None, degree=0.0): """Creates a 2D pinched quarter circle with 1 patch as cad_geometry object. Kwargs: rmin (float): Minimal radius of the quart-circle. default 0.5 rmax (float): Maximal radius of the quart-circle. default 1.0 epsilon (float): Factor of deformation of the quart-circle. default 0.5 degree (float): rotation degree. default 0.0 Returns: A cad_geometry object. """ # ... Import the quart_circle domain geo = quart_circle(rmin=rmin, rmax=rmax, center=center) #getting the patch: cad_nrb = geo[0] # Refining... list_t = None n = 4 p = 3 if n is not None: list_t = [] for axis in range(0,geo.dim): ub = cad_nrb.knots[axis][0] ue = cad_nrb.knots[axis][-1] list_t.append(np.linspace(ub,ue,n+2)[1:-1]) list_p = None if p is not None: list_p = [] for axis in range(0,cad_nrb.dim): list_p.append(p - cad_nrb.degree[axis]) geo.refine(list_t=list_t, list_p=list_p, list_m=[1,1]) # "Pinching" the geometry cad_nrb = geo[0] if ((epsilon<=1.)and(epsilon>=0)) : epsilon = epsilon/10. # first corner cad_nrb.control[0,0,1] -= epsilon cad_nrb.control[1,0,1] -= epsilon/2. cad_nrb.control[0,1,1] -= epsilon cad_nrb.control[1,1,1] -= epsilon/2. # second corner cad_nrb.control[7,0,0] -= epsilon cad_nrb.control[6,0,0] -= epsilon/2. cad_nrb.control[7,1,0] -= epsilon cad_nrb.control[6,1,0] -= epsilon/2. cad_nrb.rotate(degree) else: print(" ERROR in pinched_quart_circle : epsilon should be in between 0 and 1") STOP return geo def pinched_circle(radius=0.5, epsilon=0.5, center=None, n=None, p=None): geo = circle(radius=radius, center=center, n=n, p=p) geo[0].rotate(0.25*np.pi) cad_nrb = geo[0] # Refining... list_t = None n = 4 p = 3 if n is not None: list_t = [] for axis in range(0,geo.dim): ub = cad_nrb.knots[axis][0] ue = cad_nrb.knots[axis][-1] list_t.append(np.linspace(ub,ue,n+2)[1:-1]) list_p = None if p is not None: list_p = [] for axis in range(0,cad_nrb.dim): list_p.append(p - cad_nrb.degree[axis]) geo.refine(list_t=list_t, list_p=list_p, list_m=[1,1]) # "Pinching" the geometry cad_nrb = geo[0] if ((epsilon<=1.)and(epsilon>=0)) : epsilon = epsilon/10. # first corner cad_nrb.control[0,0,1] -= epsilon cad_nrb.control[1,0,1] -= epsilon/2. cad_nrb.control[1,1,1] -= epsilon cad_nrb.control[0,1,1] -= epsilon/2. # second corner cad_nrb.control[7,0,0] -= epsilon cad_nrb.control[6,0,0] -= epsilon/2. cad_nrb.control[6,1,0] -= epsilon cad_nrb.control[7,1,0] -= epsilon/2. # third corner cad_nrb.control[1,6,0] += epsilon cad_nrb.control[0,6,0] += epsilon/2. cad_nrb.control[0,7,0] += epsilon cad_nrb.control[1,7,0] += epsilon/2. # second corner cad_nrb.control[6,6,1] += epsilon cad_nrb.control[7,6,1] += epsilon/2. cad_nrb.control[7,7,1] += epsilon cad_nrb.control[6,7,1] += epsilon/2. else: print(" ERROR in pinched_circle : epsilon should be in between 0 and 1") STOP return geo def pinched_circle_5mp(rmin=0.5, rmax=1.0, epsilon=0.5, center=None, n=None, p=None): """Creates a 2D description of a pinched circle using 5 patchs. This geometry is supposed to avoid any singular points on the disk. Kwargs: rmin (float): Minimal radius of the annulus. default 0.5 rmax (float): Maximal radius of the circle. default 1.0 epsilon (float): Parameter that mesures the level of deformation of the internal interface. default 0.5 n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. """ # ... Import the quart_circle domain geo_1 = pinched_quart_circle(rmin=rmin, rmax=rmax, epsilon=epsilon, center=center) geo_1[0].transpose() # ... Import the quart_circle domain geo_2 = pinched_quart_circle(rmin=rmin, rmax=rmax, epsilon=epsilon, center=center, degree=0.5*np.pi) geo_2[0].reverse(0) # ... Import the quart_circle domain geo_3 = pinched_quart_circle(rmin=rmin, rmax=rmax, epsilon=epsilon, center=center, degree=np.pi) geo_3[0].reverse(0) # ... Import the quart_circle domain geo_4 = pinched_quart_circle(rmin=rmin, rmax=rmax, epsilon=epsilon, degree=1.5*np.pi) # geo_4[0].reverse(0) geo_4[0].transpose() # ... Import the pinched_circle domain geo_5 = pinched_circle(radius=rmin, epsilon=epsilon, center=center,n=n,p=p) geo_12 = geo_1.merge(geo_2) geo_34 = geo_3.merge(geo_4) geo_1234 = geo_12.merge(geo_34) geo = geo_1234.merge(geo_5) return geo def miller_equilibrium(rmin=0.5, rmax=1.0, n=None, p=None, params_shape=None, params_eq=None, n_sampling=400): """ Kwargs: rmin (float): Minimal radius of the quart-circle. default 0.5 rmax (float): Maximal radius of the quart-circle. default 1.0 Returns: A cad_geometry object. """ from scipy.interpolate import splev, splrep from numpy import pi, sin, cos, sinh, linspace, ones_like, log, sqrt, meshgrid # ... if params_shape is None: params_shape = {} params_shape['A'] = 3.17 params_shape['psi_tilde'] = 0.77 params_shape['kappa0'] = 1.66 params_shape['delta0'] = 0.416 params_shape['alpha'] = 1.22 # ... # ... if params_eq is None: params_eq = {} params_eq['sk'] = 0.7 params_eq['sd'] = 1.37 params_eq['dR0'] =-0.354 params_eq['q'] = 3.03 params_eq['s'] = 2.47 # ... # ... A = params_shape['A'] psi_tilde = params_shape['psi_tilde'] kappa0 = params_shape['kappa0'] delta0 = params_shape['delta0'] alpha = params_shape['alpha'] # ... # ... sk = params_eq['sk'] sd = params_eq['sd'] dR0 = params_eq['dR0'] q = params_eq['q'] s = params_eq['s'] # ... R0 = A * psi_tilde t = linspace(0., 2. * pi, n_sampling) if p is None: p = [1, 2] degree = p[1] if n is None: n = [17, 17] Nu = n[0] ; Nv = n[1] r_grid = linspace(rmin, rmax, Nu+2) T = np.linspace(0.,2*pi,Nv+2)[1:-1] list_crv = [] for r in r_grid: kappa = kappa0 * (1. + sk * log(r/psi_tilde)) delta = delta0 + sd * sqrt(1.-delta0**2) * log(r/psi_tilde) R = R0 + r * cos(t + sinh(delta) * sin(t)) Z = kappa * r * sin(t) tck_R = splrep(t,R, k=degree, xb=0., xe=2*pi, task=-1, s=None, t=T, full_output=0, per=1, quiet=1) tck_Z = splrep(t,Z, k=degree, xb=0., xe=2*pi, task=-1, s=None, t=T, full_output=0, per=1, quiet=1) u = tck_R[0] _n =len(u)-degree-1 P = np.zeros((_n,2)) P[:,0] = tck_R[1][:_n] P[:,1] = tck_Z[1][:_n] crv = cad_nurbs([u], P) crv = crv.clamp(0) list_crv.append(crv) geo = square(n=[Nu,Nv], p=[1,degree]) nrb = geo[0] points = np.zeros_like(nrb.points) for i, crv in enumerate(list_crv): points[i,:,:] = crv.points[:,:] nrb.set_points(points) geo.refine(list_p=[p[0]-1,0]) geo.update() return geo def trilinear(points=None, n=None, p=None): # TODO add multiplicity from igakit.cad import trilinear as nrb_trilinear """Creates a Trilinear cad_geometry object. TODO: needs to be updated Kwargs: points (array): The summits of the cube. n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. p[0,1,1] p[1,1,1] o------------o /| /| / | / | w o------------o | ^ v | p[0,0,1] | p[1,0,1] | / | | | | |/ | o-------- | -o +----> u | / p[0,1,0] | / p[1,1,0] |/ |/ o------------o p[0,0,0] p[1,0,0] """ nrb = nrb_trilinear(points) cad_nrb = cad_nurbs(nrb.knots, nrb.points, weights=nrb.weights) cad_nrb.rational = False geo = cad_geometry() geo.append(cad_nrb) # ... refinement list_t = None if n is not None: list_t = [] for axis in range(0,cad_nrb.dim): ub = cad_nrb.knots[axis][0] ue = cad_nrb.knots[axis][-1] list_t.append(np.linspace(ub,ue,n[axis]+2)[1:-1]) list_p = None if p is not None: list_p = [] for axis in range(0,cad_nrb.dim): list_p.append(p[axis] - cad_nrb.degree[axis]) geo.refine(list_t=list_t, list_p=list_p) # ... geo._internal_faces = [] geo._external_faces = [[0,0],[0,1],[0,2],[0,3],[0,4],[0,5]] geo._connectivity = [] return geo def cube(n=None, p=None, m=None): """Creates a unit cube cad_geometry object. Kwargs: n (list int): This is a list containing the number of interior knots to insert. default None p (list int): This is a list containing the spline degree of the line. default None Returns: A cad_geometry object. """ points = np.zeros((2,2,2,3)) points[0,0,0,:] = np.asarray([0.,0.,0.]) points[0,0,1,:] = np.asarray([0.,0.,1.]) points[0,1,0,:] = np.asarray([0.,1.,0.]) points[0,1,1,:] = np.asarray([0.,1.,1.]) points[1,0,0,:] = np.asarray([1.,0.,0.]) points[1,0,1,:] = np.asarray([1.,0.,1.]) points[1,1,0,:] = np.asarray([1.,1.,0.]) points[1,1,1,:] = np.asarray([1.,1.,1.]) if m is not None: print ("TODO add multiplicity in cube and trilinear") return trilinear(points=points, n=n, p=p) def merge(list_geo, npts=5): """ merge a list of cad_geometries and update internal/external faces and connectivities Args: list_geo: a list of cad_geometries Returns: a cad_geometries """ geo_f = list_geo[0] for geo in list_geo[1:]: geo_f = geo_f.merge(geo, npts=npts) return geo_f def tcoons(curves, profile=0): """ creates a 2D geometry (for the moment) using 3 curves. the profile is given as for the construction of Triangle. TODO: profile > 0 Args: curves: list of curves profile: the profile of the triangle Returns: a NURBS """ from igakit.cad import coons [c1, c2, c3] = curves if np.allclose(c2.points[-1,:],c1.points[0,:]): c2 = c2.reverse() if np.allclose(c3.points[-1,:],c1.points[-1,:]): c3 = c3.reverse() # ... if profile==0: A = c1.points[0,:] P = np.zeros_like(c3.points) for i in range(0,P.shape[0]): P[i,:] = A[:] c = cad_nurbs(c3.knots, P, weights=c3.weights) nrb = coons([[c1,c2],[c,c3]]) # ... # ... if profile==1: raise NotImplementedError("TCoons with profile 1 Not yet implemented") # A = c2.points[0,:] # # P = np.zeros_like(c3.points) # for i in range(0,P.shape[0]): # P[i,:] = A[:] # # c = cad_nurbs(c3.knots, P, weights=c3.weights) # nrb = coons([[c1,c2],[c,c3]]) # ... # ... if profile==2: raise NotImplementedError("TCoons with profile 2 Not yet implemented") # A = c1.points[0,:] # # P = np.zeros_like(c3.points) # for i in range(0,P.shape[0]): # P[i,:] = A[:] # # c = cad_nurbs(c3.knots, P, weights=c3.weights) # nrb = coons([[c1,c2],[c,c3]]) # ... # ... if profile==3: raise NotImplementedError("TCoons with profile 3 Not yet implemented") # A = c1.points[0,:] # # P = np.zeros_like(c3.points) # for i in range(0,P.shape[0]): # P[i,:] = A[:] # # c = cad_nurbs(c3.knots, P, weights=c3.weights) # nrb = coons([[c1,c2],[c,c3]]) # ... # ... return nrb class cad_io: def __init__(self, file, mode="r"): """Open file and return a corresponding stream. If the file cannot be opened, an IOError is raised. file must be in ['xml', 'txt', 'zip'] TODO : add hdf5 format Args: file: file name. mode: is an optional string that specifies the mode in which the file is opened. It defaults to 'r' which means open for reading in text mode. Other common values are 'w' for writing (truncating the file if it already exists) Returns: A file stream Raises: IOError: An error occurred accessing the bigtable.Table object. """ self.__filename__ = file self.__format__ = str(file.split('.')[-1]).lower() self.mode = mode if self.mode not in ["r","w"]: raise ValueError("cad_io : mode must be r or w") if self.__format__ not in ["xml","zip","txt", "nml"]: raise ValueError("cad_io : format must be xml, nml, zip or txt") def read(self, geo): """ Write the given cad_geometry to the underlying stream """ if self.mode != "r": raise ValueError("mode file must be set to r ") if self.__format__=="vtk": raise ValueError("VTK import Not yet implemented") if self.__format__=="hdf5": raise ValueError("DHF5 import Not yet implemented") if self.__format__=="xml": try: rw = XML() return rw.read(self.__filename__, geo) except IOError as e: print(("I/O error({0}): {1}".format(e.errno, e.strerror))) except: raise ValueError("Unexpected error:", sys.exc_info()[0]) if self.__format__=="nml": try: rw = NML() return rw.read(self.__filename__, geo) except IOError as e: print(("I/O error({0}): {1}".format(e.errno, e.strerror))) except: raise ValueError("Unexpected error:", sys.exc_info()[0]) def write(self, geo): """ Write the given cad_geometry geo """ if self.mode != "w": raise ValueError("mode file must be set to w ") if self.__format__=="hdf5": raise ValueError("Not yet implemented") if self.__format__=="xml": try: rw = XML() rw.write(self.__filename__, geo) except IOError as e: print(("I/O error({0}): {1}".format(e.errno, e.strerror))) except: raise ValueError("Unexpected error:", sys.exc_info()[0]) if self.__format__=="zip": try: rw = TXT() rw.write(self.__filename__, geo, fmt="zip") except IOError as e: print(("I/O error({0}): {1}".format(e.errno, e.strerror))) except: raise ValueError("Unexpected error:", sys.exc_info()[0]) if self.__format__=="txt": try: rw = TXT() rw.write(self.__filename__, geo, fmt="txt") except IOError as e: print(("I/O error({0}): {1}".format(e.errno, e.strerror))) except: raise ValueError("Unexpected error:", sys.exc_info()[0]) class cad_object(object): def __new__(typ, *args, **kwargs): obj = object.__new__(typ) obj._attributs = {} obj.rational = False obj.orientation = None obj.face = None return obj def __init__(self): self._attributs = {} self.rational = False self.orientation = None self.face = None def _clone_data(self, other): try: self.rational = other.rational self.orientation = other.orientation self._attributs = other._attributs except: pass def _copy_data(self, other): try: self.rational = other.rational self.orientation = other.orientation.copy() self._attributs = other._attributs.copy() except: pass def set_attributs(self, attributs): """ sets attributs for the current cad_nurbs object. Needed when constructing the geometry by *hand* """ self._attributs = attributs def set_attribut(self, attribut, value): """ sets attribut to the value value for the current cad_nurbs object. Needed when constructing the geometry by *hand* """ self._attributs[attribut] = value def get_attribut(self, name): """ returns attributs for the current cad_nurbs object. """ try: return self._attributs[name] except: return None @property def attributs(self): return self._attributs def set_orientation(self, list_sgn): """ sets the orientation of the boundary for the current cad_nurbs object. Parameters ---------- list_sgn : list of floats list_sgn is a list of signs for each face. the length of list_sgn is equal to the number of faces; 2 in 1D, 4 in 2D and 6 in 3D. a sign is either +1 or -1. """ self.orientation = list_sgn def set_rational(self, value): """ make the current cad_nurbs object rational. Parameters ---------- value : int if equal to 1: the cad_nurbs is rational, and we use weights each time we evaluate it at given sites. if equal to 0: the cad_nubrs is simply a spline object, no need to use weights. """ self.rational = value @property def nFaces(self): if self.dim == 1: nfaces = 2 if self.dim == 2: nfaces = 4 if self.dim == 3: nfaces = 6 return nfaces def metric(self, u=None, v=None, w=None): """ compute and return the metric at the parametric 1D sites u, v, w """ xyz = [] Dw = self.evaluate_deriv(u=u,v=v,w=w,nderiv=1) if self.dim == 1: x = Dw[0,:,:,0] xdu = Dw[1,:,:,0] y = Dw[0,:,:,1] ydu = Dw[1,:,:,1] jac = 1. / np.sqrt(xdu**2 + ydu**2) xyz = [x,y] if self.dim == 2: x = Dw[0,:,:,0] xdu = Dw[1,:,:,0] xdv = Dw[2,:,:,0] y = Dw[0,:,:,1] ydu = Dw[1,:,:,1] ydv = Dw[2,:,:,1] jac = xdu * ydv - xdv * ydu xyz = [x,y] if self.dim == 3: raise ValueError("Not yet implemented") return xyz, jac class cad_nurbs(cad_object, NURBS): #class cad_nurbs(NURBS, cad_object): # def __new__(typ, *args, **kwargs): # obj = NURBS.__new__(typ) # obj._attributs = {} # obj.rational = False # obj.orientation = None # obj.face = None # return obj def __init__(self, *args, **kwargs): """ creates a cad_nurbs object. arguments are the same as for a cad_nurbs object. rational, orientation and face are set to None. The user must specify them in order to finalize the construction of the cad_nurbs object. An abstract cad_nurbs object class. This cad_nurbs class allows for the definition of B-spline or cad_nurbs curves/surfaces/volumes by specifying a control point array, a sequence of knot vectors and optional rational weights. Parameters ---------- control : array_like or 2-tuple of array_like Control points and optional rational weights. knots : sequence of array_like Knot vectors. The number of knot vectors will define what kind of cad_nurbs object is created (1=curve, 2=surface, 3=volume). weights : array_like, optional Rational weights. If weights are omitted, the object will be non-rational (B-spline). fields : array_like, optional Additional fields. Attributes ---------- dim : int Parametric dimension of the cad_nurbs object {1,2,3} shape : tuple of ints Number of control points in each parametric dimension. degree : tuple of ints Polynomial degrees in each parametric dimension. knots : tuple of numpy.ndarray Knot vectors in each parametric dimension. array: numpy.ndarray Container for control points, weights, and fields. control : numpy.ndarray Control points in homogeneous 4D space (includes rational weights). weigths : numpy.ndarray Rational weigths. points : numpy.ndarray Control points projected into Cartesian 3D space. fields : numpy.ndarray or None Additional fields. Examples -------- Create a quarter circle cad_nurbs curve with 2D control points and rational weigths and check error: >>> C = [[0, 1], [1, 1], [1, 0]] # 3x2 grid of 2D control points >>> w = [1, np.sqrt(2)/2, 1] # rational weigths >>> U = [0,0,0, 1,1,1] # knot vector >>> crv = cad_nurbs([U], C, weights=w) >>> u = np.linspace(0,1,1000) >>> xyz = crv(u) >>> x, y, z = xyz.T >>> r = np.sqrt(x**2+y**2) >>> np.allclose(r, 1, rtol=0, atol=1e-15) True >>> np.allclose(z, 0, rtol=0, atol=1e-15) True Create a quarter circle cad_nurbs curve with homogeneous 4D control points and check error: >>> wgt = np.sqrt(2)/2 >>> Cw = np.zeros((3,4)) >>> Cw[0,:] = [0.0, 1.0, 0.0, 1.0] >>> Cw[1,:] = [wgt, wgt, 0.0, wgt] >>> Cw[2,:] = [1.0, 0.0, 0.0, 1.0] >>> crv = cad_nurbs([U], Cw) >>> u = np.linspace(0,1,1000) >>> xyz = crv(u) >>> x, y, z = xyz.T >>> r = np.sqrt(x**2+y**2) >>> np.allclose(r, 1, rtol=0, atol=1e-15) True >>> np.allclose(z, 0, rtol=0, atol=1e-15) True Create a random B-spline curve: >>> C = np.random.rand(3,3) # 3D control points >>> U = [0,0,0, 1,1,1] # knot vector >>> crv = cad_nurbs([U], C) >>> crv.dim 1 >>> crv.shape (3,) >>> crv.degree (2,) >>> np.allclose(crv.knots[0], U, rtol=0, atol=1e-15) True >>> np.allclose(crv.points, C, rtol=0, atol=1e-15) True >>> np.allclose(crv.weights, 1, rtol=0, atol=1e-15) True Create a random B-spline surface: >>> C = np.random.rand(3,2,3) # 3x2 grid of 3D control points >>> U = [0,0,0, 1,1,1] # knot vector >>> V = [0,0, 1,1] # knot vector >>> srf = cad_nurbs([U,V], C) >>> srf.dim 2 >>> srf.shape (3, 2) >>> srf.degree (2, 1) >>> np.allclose(srf.knots[0], U, rtol=0, atol=1e-15) True >>> np.allclose(srf.knots[1], V, rtol=0, atol=1e-15) True >>> np.allclose(srf.points, C, rtol=0, atol=1e-15) True >>> np.allclose(srf.weights, 1, rtol=0, atol=1e-15) True Create a random B-spline volume: >>> C = np.random.rand(3,2,7,3) # 3x2x7 grid of 3D control points >>> U = [0,0,0, 1,1,1] # knot vector >>> V = [0,0, 1,1] # knot vector >>> W = [0]*4+[0.25, 0.5, 0.5]+[1]*4 >>> vol = cad_nurbs([U,V,W], C) >>> vol.dim 3 >>> vol.shape (3, 2, 7) >>> vol.degree (2, 1, 3) >>> np.allclose(vol.knots[0], U, rtol=0, atol=1e-15) True >>> np.allclose(vol.knots[1], V, rtol=0, atol=1e-15) True >>> np.allclose(vol.knots[2], W, rtol=0, atol=1e-15) True >>> np.allclose(vol.points, C, rtol=0, atol=1e-15) True >>> np.allclose(vol.weights, 1, rtol=0, atol=1e-15) True """ NURBS.__init__(self, *args, **kwargs) cad_object.__init__(self) self._attributs = {} self.rational = False self.orientation = None self.face = None self.set_attribut("type",self.__class__.__name__ ) def set_points(self, pts): """ Sets the Control point grid projected into Cartesian 3D space. """ self.array[...,:3] = pts * self.weights[...,np.newaxis] def _clone_data(self, nrb): # nrb can be NURBS or cad_nurbs self._array = nrb.array self._knots = nrb.knots cad_object._clone_data(self, nrb) def _copy_data(self, nrb): # nrb can be NURBS or cad_nurbs self._array = self.array.copy() self._knots = tuple(k.copy() for k in self.knots) cad_object._copy_data(self, nrb) def clone(self): """ Clone a cad_nurbs object. Returns a new instace of the cad_nurbs objects with references to the control points and knot vectors of this cad_nurbs object. Modifying the knot vector or control points of the returned object WILL affect this object. Examples -------- Create a random curve, copy the curve, change the control points, demonstrate that changing c2 changes c1. >>> C = np.random.rand(5,2) >>> U = [0,0,1,2,3,4,4] >>> c1 = cad_nurbs([U], C) >>> c2 = c1.clone() >>> c2.control[2,:] = [1.0,1.0,0.0,1.0] >>> (abs(c2.control-c1.control)).max() < 1.0e-15 True """ cad_nrb = cad_nurbs.__new__(type(self)) cad_nrb._clone_data(self) return cad_nrb def copy(self): """ Copy a cad_nurbs object. Returns a new instace of the cad_nurbs objects with copies to the control points and knot vectors of this cad_nurbs object. Examples -------- Create a random curve, copy the curve, change the control points, demonstrate that changing c2 changes c1. >>> C = np.random.rand(5,2) >>> U = [0,0,1,2,3,4,4] >>> c1 = cad_nurbs([U], C) >>> c2 = c1.copy() >>> c2.control[2,:] = [1.0,1.0,0.0,1.0] >>> (abs(c2.control-c1.control)).max() < 1.0e-15 True """ cad_nrb = cad_nurbs.__new__(type(self)) cad_nrb._array = self.array.copy() cad_nrb._knots = tuple(k.copy() for k in self.knots) cad_nrb._attributs = {} cad_nrb.rational = self.rational cad_nrb.orientation = self.orientation cad_nrb.face = self.face return cad_nrb def elevate(self, *args, **kwargs): """ Degree elevate a cad_nurbs object. Given a polynomial degree to elevate in a parameter direction, degree-elevate the curve. The routine operates on the cad_nurbs object in-place and returns the object. Parameters ---------- axis : int Parameter direction to degree-elevate times : int, optional Polynomial order to elevate Examples -------- Create a random curve, degree elevate, check error: >>> C = np.random.rand(3,3) >>> U = [0,0,0,1,1,1] >>> c1 = cad_nurbs([U], C) >>> c1.degree (2,) >>> c2 = c1.clone().elevate(0, 2) >>> c2.degree (4,) >>> u = np.linspace(0,1,100) >>> xyz1 = c1(u) >>> xyz2 = c2(u) >>> np.allclose(xyz1, xyz2, rtol=0, atol=1e-15) True Create a random surface, degree elevate, check error: >>> C = np.random.rand(3,3,3) >>> U = [0,0,0,1,1,1] >>> V = [0,0,0.5,1,1] >>> s1 = cad_nurbs([U,V], C) >>> s1.degree (2, 1) >>> s2 = s1.clone().elevate(0, 1).elevate(1, 1) >>> s2.degree (3, 2) >>> u = v = np.linspace(0,1,100) >>> xyz1 = s1(u, v) >>> xyz2 = s2(u, v) >>> np.allclose(xyz1, xyz2, rtol=0, atol=1e-15) True """ nrb = NURBS.elevate(self, *args, **kwargs) cad_nrb = cad_nurbs.__new__(type(self)) cad_nrb._clone_data(nrb) cad_nrb.rational = self.rational cad_nrb.orientation = self.orientation cad_nrb._attributs = self._attributs return cad_nrb # def evalMesh(self, npts=3, arr_npts=None): # ... def _refine_array(x, nref): u0 = x[0] ; u1 = x[-1] nx = len(x) ne = nx - 1 xref = np.zeros((ne, nref)) for i,(a,b) in enumerate(zip(x[:-1], x[1:])): xref[i,:] = np.linspace(a,b,nref+1)[0:-1] xref = xref.reshape(ne*nref) xref = np.concatenate((xref,np.asarray([u1]))) return xref # ... if arr_npts is None: arr_npts = np.arange(self.dim) arr_npts[:] = npts else: if len(arr_npts) != self.dim: print ("wrong length argument") raise() breaks = self.breaks() list_lines = [] if self.dim == 1: u = _refine_array(breaks[0], arr_npts[0]) n_u = u.shape[0] Q = self(u) L = Q[:,:] list_lines.append(L) if self.dim == 2: u = _refine_array(breaks[0], arr_npts[0]) v = _refine_array(breaks[1], arr_npts[1]) n_u = u.shape[0] n_v = v.shape[0] Q = self(u,v) for i in range(0, n_u): L = Q[i,:,:] list_lines.append(L) for j in range(0, n_v): L = Q[:,j,:] list_lines.append(L) if self.dim == 3: list_lines = [] u = _refine_array(breaks[0], arr_npts[0]) v = _refine_array(breaks[1], arr_npts[1]) w = _refine_array(breaks[2], arr_npts[2]) n_u = u.shape[0] n_v = v.shape[0] n_w = w.shape[0] Q = self(u,v,w) for i in range(0, n_u): for j in range(0, n_v): L = Q[i,j,:,:] list_lines.append(L) for j in range(0, n_v): for k in range(0, n_w): L = Q[:,j,k,:] list_lines.append(L) for i in range(0, n_u): for k in range(0, n_w): L = Q[i,:,k,:] list_lines.append(L) return list_lines def evaluate_deriv(self, *args, **kwargs): """ evaluates a cad_nurbs object (and its derivatives) at the given parametric values. Degree elevate a cad_nurbs object. Parameters ---------- u : float or array_like The first logical direction v : float or array_like The second logical direction w : float or array_like The third logical direction nderiv : int, optional Derivative order (Default=1) rationalize : int, optional Put to 1 if the cad_nurbs is a NURBS (need to use weights for evaluation) fields : bool or array_like, optional TODO Examples -------- Import the circle curve, compute points position and their derivatives: >>> from caid.cad_geometry import circle >>> geo = circle() >>> nrb = geo[0].extract(1,0.) >>> n = 10 >>> t = np.linspace(0,1,n) >>> Dw = nrb.evaluate_deriv(t) >>> x = Dw[0,:,0] >>> y = Dw[0,:,1] >>> dx = Dw[1,:,0] >>> dy = Dw[1,:,1] >>> plt.figure() >>> t = np.linspace(0,1,100) >>> D = nrb(t) >>> xc = D[:,0] >>> yc = D[:,1] >>> plt.plot(x,y, 'ob') >>> plt.plot(xc,yc, '-r') >>> plt.show() The following example, shows the import of the 2D circle description, the computation of points and their derivatives. Finally, we plot the jacobian of the mapping: >>> from caid.cad_geometry import circle as domain >>> nrb = domain()[0] >>> nx = 100 >>> tx = np.linspace(0,1,nx) >>> ny = 100 >>> ty = np.linspace(0,1,ny) >>> Dw = nrb.evaluate_deriv(tx,ty,nderiv=1) >>> x = Dw[0,:,:,0] >>> xdu = Dw[1,:,:,0] >>> xdv = Dw[2,:,:,0] >>> y = Dw[0,:,:,1] >>> ydu = Dw[1,:,:,1] >>> ydv = Dw[2,:,:,1] >>> plt.figure() >>> jac = xdu * ydv - xdv * ydu >>> plt.contourf(x,y,jac) ; plt.colorbar() ; plt.title("Jacobian of the mapping") >>> plt.show() see also :class:`cad_geometry.plotJacobians` """ rationalize = 0 if self.rational: rationalize = 1 kwargs['rationalize'] = rationalize return NURBS.evaluate_deriv(self, *args, **kwargs) def extract_face(self, axis, side): """ Extracts a face from the current cad_nurbs object. Parameters ---------- axis : int side : int the ID of the boundary when want to extract Returns ------- a cad_nurbs object """ # ... n = self.shape[axis] p = self.degree[axis] i_bnd = 0 if side == 0: i_bnd = p elif side == 1: i_bnd = n + 1 else: raise ValueError("wrong argument") # ... # ... ubound = self.knots[axis][i_bnd] face = couple_vers_entier(axis, side) # ... # ... nrb_bnd = self.extract(axis,ubound) cad_nrb = cad_nurbs.__new__(type(self)) cad_nrb._array = nrb_bnd.array cad_nrb._knots = nrb_bnd.knots cad_nrb.rational = self.rational try: cad_nrb.orientation = [self.orientation[face]] except: pass cad_nrb._attributs = self._attributs cad_nrb.face = face # ... return cad_nrb def plotBoundariesInfo(self): """ plots some informations about the current cad_nurbs object, including the orientation of boundary. """ from matplotlib import pyplot as plt nrb = self if nrb.dim == 2: # ... def plot_crv( nrb, t1, t2, tangent=True, normal=False, color='b' \ , label=None,scale=1.e-1, width='0.00001'): Dw = nrb.evaluate_deriv(t1) x = Dw[0,:,0] y = Dw[0,:,1] dx = Dw[1,:,0] dy = Dw[1,:,1] D = nrb(t2) xc = D[:,0] yc = D[:,1] d = np.sqrt(dx**2 + dy**2) dx = scale * dx/d dy = scale * dy/d n = len(x) for i in range(0,n): if tangent: arr = plt.Arrow(x[i], y[i], dx[i], dy[i], width=width) if normal: arr = plt.Arrow(x[i], y[i], dy[i], -dx[i], width=width) arr.set_facecolor('g') ax = plt.gca() ax.add_patch(arr) plt.plot(x,y, 'o'+color) if label is not None: plt.plot(xc,yc, '-'+color, label=label) else: plt.plot(xc,yc, '-'+color) # ... plt.figure() list_colors = ['r', 'g', 'b', 'k', 'y', 'c'] i = 0 for axis in range(0, self.dim): t1b = nrb.knots[axis][0] ; t1e = nrb.knots[axis][-1] t2b = nrb.knots[axis][0] ; t2e = nrb.knots[axis][-1] t1 = np.linspace(t1b,t1e,10) t2 = np.linspace(t2b,t2e,100) for side in range(0, 2): nrb_bnd = nrb.extract_face(axis, side) face = couple_vers_entier(axis, side) color = list_colors[i] plot_crv(nrb_bnd, t1, t2, tangent=False, normal=True, color=color, label='Face '+str(face),scale=1.e-1, width='0.00001') i += 1 plt.legend() else: raise NotImplementedError('') def evaluate_deriv(self, u=None, v=None, w=None \ , nderiv=1): """ Evaluate the NURBS object at the given parametric values. Parameters ---------- u, v, w : float or array_like Examples -------- >>> C = [[-1,0],[0,1],[1,0]] >>> U = [0,0,0,1,1,1] >>> crv = NURBS([U], C) >>> crv.evaluate(0.5).tolist() [0.0, 0.5, 0.0] >>> crv.evaluate([0.5]).tolist() [[0.0, 0.5, 0.0]] >>> crv.evaluate([0,1]).tolist() [[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0]] """ def Arg(p, U, u): u = np.asarray(u, dtype='d') assert u.min() >= U[p] assert u.max() <= U[-p-1] return u # dim = self.dim nderivatives = nderiv if dim == 1: if nderiv == 1: nderivatives = 1 if nderiv == 2: nderivatives = 1+1 if dim == 2: if nderiv == 1: nderivatives = 2 if nderiv == 2: nderivatives = 2+3 if dim == 3: if nderiv == 1: nderivatives = 3 if nderiv == 2: nderivatives = 3+6 uvw = [u,v,w][:dim] for i, a in enumerate(uvw): if a is None: uvw[i] = self.breaks(i) else: U = self.knots[i] p = self.degree[i] uvw[i] = Arg(p, U, a) # # array = self.array d_norm = np.linalg.norm(self.weights - np.ones_like(self.weights)) rationalize = 0 if d_norm > 1.e-7: rationalize = 1 arglist = [nderiv, nderivatives, rationalize] print((" rationalize ", rationalize)) for p, U in zip(self.degree, self.knots): arglist.extend([p, U]) arglist.append(array) arglist.extend(uvw) # Evaluate = getattr(_bsp, 'EvaluateDeriv%d' % self.dim) CwF = Evaluate(*arglist) return CwF[...,:3] def grad(self, u=None, v=None, w=None): ndim = len(self.shape) Dw = self.evaluate_deriv(u=u, v=v, w=w) xyz_arrays = [] du = Dw[1,...,:] xyz_arrays.append(du) if ndim > 1: dv = Dw[2,...,:] xyz_arrays.append(dv) if ndim > 2: dt = Dw[3,...,:] xyz_arrays.append(dt) return xyz_arrays def second_deriv(self, u=None, v=None, w=None): ndim = len(self.shape) Dw = self.evaluate_deriv(u=u, v=v, w=w, nderiv=2) if ndim == 1: nb = 2 ne = 3 if ndim == 2: nb = 3 ne = 6 if ndim == 3: nb = 4 ne = 10 xyz_arrays = [] for i in range(nb,ne): du = Dw[i,...,:] xyz_arrays.append(du) return xyz_arrays def tangent(self, u=None, v=None, w=None, unit=True): Dw = self.evaluate_deriv(u=u, v=v, w=w) dx = Dw[1,:,0] dy = Dw[1,:,1] if unit: d = np.sqrt(dx**2 + dy**2) dx = dx/d dy = dy/d return dx, dy def normal(self, u=None, v=None, w=None, unit=True): Dw = self.evaluate_deriv(u=u, v=v, w=w) dx = Dw[1,:,0] dy = Dw[1,:,1] if unit: d = np.sqrt(dx**2 + dy**2) dx = dx/d dy = dy/d return -dy, dx # class cad_op_nurbs(opNURBS, cad_object): def __new__(typ, *args, **kwargs): obj = object.__new__(typ) obj._attributs = {} obj.rational = False obj.orientation = None obj.face = None obj._nrb = None return obj def __init__(self, op_nrb): self._nrb = op_nrb cad_object.__init__(self) self.set_attribut("type",self.__class__.__name__ ) self.set_attribut("operator",op_nrb.__class__.__name__ ) @property def array(self): return self._nrb.array @property def knots(self): return self._nrb.knots def _clone_data(self, op_nrb): # nrb can be opNURBS or cad_op_nurbs self._array = op_nrb.array self._knots = op_nrb.knots cad_object._clone_data(self, op_nrb) self._nrb = op_nrb._nrb def clone(self): """ Clone a cad_op_nurbs object. Returns a new instace of the cad_nurbs objects with references to the control points and knot vectors of this cad_nurbs object. Modifying the knot vector or control points of the returned object WILL affect this object. Examples -------- Create a random curve, copy the curve, change the control points, demonstrate that changing c2 changes c1. >>> C = np.random.rand(5,2) >>> U = [0,0,1,2,3,4,4] >>> nrb = NURBS([U], C) >>> gnrb = grad(nrb) >>> c1 = cad_op_nurbs(gnrb) >>> c2 = c1.clone() >>> c2.control[2,:] = [1.0,1.0,0.0,1.0] >>> (abs(c2.control-c1.control)).max() < 1.0e-15 True """ cad_nrb = cad_op_nurbs.__new__(type(self)) cad_nrb._clone_data(self) return cad_nrb def elevate(self, *args, **kwargs): """ Degree elevate a cad_nurbs object. Given a polynomial degree to elevate in a parameter direction, degree-elevate the curve. The routine operates on the cad_nurbs object in-place and returns the object. Parameters ---------- axis : int Parameter direction to degree-elevate times : int, optional Polynomial order to elevate Examples -------- Create a random curve, degree elevate, check error: >>> C = np.random.rand(3,3) >>> U = [0,0,0,1,1,1] >>> c1 = cad_nurbs([U], C) >>> c1.degree (2,) >>> c2 = c1.clone().elevate(0, 2) >>> c2.degree (4,) >>> u = np.linspace(0,1,100) >>> xyz1 = c1(u) >>> xyz2 = c2(u) >>> np.allclose(xyz1, xyz2, rtol=0, atol=1e-15) True Create a random surface, degree elevate, check error: >>> C = np.random.rand(3,3,3) >>> U = [0,0,0,1,1,1] >>> V = [0,0,0.5,1,1] >>> s1 = cad_nurbs([U,V], C) >>> s1.degree (2, 1) >>> s2 = s1.clone().elevate(0, 1).elevate(1, 1) >>> s2.degree (3, 2) >>> u = v = np.linspace(0,1,100) >>> xyz1 = s1(u, v) >>> xyz2 = s2(u, v) >>> np.allclose(xyz1, xyz2, rtol=0, atol=1e-15) True """ nrb = NURBS.elevate(self, *args, **kwargs) cad_nrb = cad_nurbs.__new__(type(self)) cad_nrb._clone_data(nrb) cad_nrb.rational = self.rational cad_nrb.orientation = self.orientation cad_nrb._attributs = self._attributs return cad_nrb def evaluate(self, *args, **kwargs): return self._nrb.evaluate(*args, **kwargs) def evaluate_deriv(self, *args, **kwargs): """ evaluates a cad_nurbs object (and its derivatives) at the given parametric values. Degree elevate a cad_nurbs object. Parameters ---------- u : float or array_like The first logical direction v : float or array_like The second logical direction w : float or array_like The third logical direction nderiv : int, optional Derivative order (Default=1) rationalize : int, optional Put to 1 if the cad_nurbs is a NURBS (need to use weights for evaluation) fields : bool or array_like, optional TODO Examples -------- Import the circle curve, compute points position and their derivatives: >>> from caid.cad_geometry import circle >>> geo = circle() >>> nrb = geo[0].extract(1,0.) >>> n = 10 >>> t = np.linspace(0,1,n) >>> Dw = nrb.evaluate_deriv(t) >>> x = Dw[0,:,0] >>> y = Dw[0,:,1] >>> dx = Dw[1,:,0] >>> dy = Dw[1,:,1] >>> plt.figure() >>> t = np.linspace(0,1,100) >>> D = nrb(t) >>> xc = D[:,0] >>> yc = D[:,1] >>> plt.plot(x,y, 'ob') >>> plt.plot(xc,yc, '-r') >>> plt.show() The following example, shows the import of the 2D circle description, the computation of points and their derivatives. Finally, we plot the jacobian of the mapping: >>> from caid.cad_geometry import circle as domain >>> nrb = domain()[0] >>> nx = 100 >>> tx = np.linspace(0,1,nx) >>> ny = 100 >>> ty = np.linspace(0,1,ny) >>> Dw = nrb.evaluate_deriv(tx,ty,nderiv=1) >>> x = Dw[0,:,:,0] >>> xdu = Dw[1,:,:,0] >>> xdv = Dw[2,:,:,0] >>> y = Dw[0,:,:,1] >>> ydu = Dw[1,:,:,1] >>> ydv = Dw[2,:,:,1] >>> plt.figure() >>> jac = xdu * ydv - xdv * ydu >>> plt.contourf(x,y,jac) ; plt.colorbar() ; plt.title("Jacobian of the mapping") >>> plt.show() see also :class:`cad_geometry.plotJacobians` """ rationalize = 0 if self.rational: rationalize = 1 kwargs['rationalize'] = rationalize return self._nrb.evaluate_deriv(*args, **kwargs) def extract_face(self, axis, side): """ Extracts a face from the current cad_nurbs object. Parameters ---------- axis : int side : int the ID of the boundary when want to extract Returns ------- a cad_nurbs object """ # ... n = self.shape[axis] p = self.degree[axis] i_bnd = 0 if side == 0: i_bnd = p elif side == 1: i_bnd = n + 1 else: raise ValueError("wrong argument") # ... # ... ubound = self.knots[axis][i_bnd] face = couple_vers_entier(axis, side) # ... nrb_bnd = self.extract(axis,ubound) cad_nrb = cad_nurbs.__new__(type(self)) cad_nrb._array = nrb_bnd.array cad_nrb._knots = nrb_bnd.knots cad_nrb.rational = self.rational cad_nrb.orientation = [self.orientation[face]] cad_nrb._attributs = self._attributs cad_nrb.face = face return cad_nrb def plotBoundariesInfo(self): """ plots some informations about the current cad_nurbs object, including the orientation of boundary. """ from matplotlib import pyplot as plt nrb = self if nrb.dim == 2: # ... def plot_crv( nrb, t1, t2, tangent=True, normal=False, color='b' \ , label=None,scale=1.e-1, width='0.00001'): Dw = nrb.evaluate_deriv(t1) x = Dw[0,:,0] y = Dw[0,:,1] dx = Dw[1,:,0] dy = Dw[1,:,1] D = nrb(t2) xc = D[:,0] yc = D[:,1] d = np.sqrt(dx**2 + dy**2) dx = scale * dx/d dy = scale * dy/d n = len(x) for i in range(0,n): if tangent: arr = plt.Arrow(x[i], y[i], dx[i], dy[i], width=width) if normal: arr = plt.Arrow(x[i], y[i], dy[i], -dx[i], width=width) arr.set_facecolor('g') ax = plt.gca() ax.add_patch(arr) plt.plot(x,y, 'o'+color) if label is not None: plt.plot(xc,yc, '-'+color, label=label) else: plt.plot(xc,yc, '-'+color) # ... t1b = nrb.knots[0][0] ; t1e = nrb.knots[0][-1] t2b = nrb.knots[0][0] ; t2e = nrb.knots[0][-1] t1 = np.linspace(t1b,t1e,10) t2 = np.linspace(t2b,t2e,100) plt.figure() list_colors = ['r', 'g', 'b', 'k', 'y', 'c'] i = 0 for axis in range(0, self.dim): t1b = nrb.knots[axis][0] ; t1e = nrb.knots[axis][-1] t2b = nrb.knots[axis][0] ; t2e = nrb.knots[axis][-1] t1 = np.linspace(t1b,t1e,10) t2 = np.linspace(t2b,t2e,100) for side in range(0, 2): nrb_bnd = nrb.extract_face(axis, side) face = couple_vers_entier(axis, side) color = list_colors[i] plot_crv(nrb_bnd, t1, t2, tangent=False, normal=True, color=color, label='Face '+str(face),scale=1.e-1, width='0.00001') i += 1 plt.legend() class cad_grad_nurbs(cad_op_nurbs): def __init__(self, *args, **kwargs): cad_op_nurbs.__init__(self, *args, **kwargs) class cad_geometry(object): def __new__(typ, *args, **kwargs): obj = object.__new__(typ) obj._list = [] obj._currentElt = -1 obj._internal_faces = [] obj._external_faces = [] obj._connectivity = [] obj._attributs = {} return obj def __init__(self, file=None, geo=None): """ Creates a cad_geometry object. Examples -------- Import a predefined geometry >>> from caid.cad_geometry import circle >>> geo = circle(radius=2.) print the number of patchs >>> print geo.npatchs 1 even in 2D, a cad_geometry is a 3D surface >>> print geo.Rd 3 the dimension of the logical domain is >>> print geo.dim 2 Next, we print some information about the connectivity, internal and external faces. These informations are important for a FEM solver. >>> geo.connectivity [] >>> geo.external_faces [[0, 0], [0, 1], [0, 2], [0, 3]] >>> geo.internal_faces [] In the next example, we create a circle cad_geometry object, with a logical grid of 31x31 internal points, using cubic NURBS >>> geo = circle(radius=2., n=[31,31], p=[3,3]) Read XML file describing the geometry >>> geo = cad_geometry("mydomain.xml") """ self._list = [] self._currentElt = -1 self._internal_faces = [] self._external_faces = [] self._connectivity = [] self._attributs = {} self._r_dim = None if file is not None: self.__filename__ = file geo_io = cad_io(self.__filename__, mode="r") self = geo_io.read(self) if geo is not None: # TODO : que faire pour les infos?? self.append(geo) def save(self, file): """ saves the current cad_geometry. The current formats are ['xml', 'txt', 'zip'] see :class:cad_io for more details """ geo_io = cad_io(file, mode="w") geo_io.write(self) @property def internal_faces(self): """ returns the internal faces for the current cad_geometry object """ return self._internal_faces @property def external_faces(self): """ returns the external faces for the current cad_geometry object Examples -------- This example is given from the annulus cad_geometry object >>> from caid.cad_geometry import annulus >>> geo.external_faces [[0, 0], [0, 2]] """ return self._external_faces @property def connectivity(self): """ returns the connectivity of the current cad_geometry object The connectivity is a list of dictionaries. Each dictionary contains two keys: original: a couple of master-patch and master-face as [patch_id, face_id] clone: a couple of slave-patch and slave-face as [patch_id, face_id] Examples -------- This example is given from the annulus cad_geometry object >>> from caid.cad_geometry import annulus >>> geo.connectivity [{'clone': [0, 3], 'original': [0, 1]}] """ return self._connectivity @property def list_all(self): return self._list @property def dim(self): """ Parametric dimension of the geometry object {1,2,3}. """ try: dim = self._list[0].dim except: dim = 0 return dim @property def Rd(self): """ Physical dimension of the geometry object {1,2,3}. """ if self._r_dim is not None: return self._r_dim else: try: Rd = self._list[0].points.shape[-1] except: Rd = 0 return Rd def set_r_dim(self, r_dim): self._r_dim = r_dim def __len__(self): return len(self._list) @property def npatchs(self): """ number of patchs of the geometry """ return len(self._list) def index(self, nrb): return self._list.index(nrb) def set_internal_faces(self, values): """ Sets the list of internal faces Examples -------- This example is given from the annulus cad_geometry object >>> geo.set_internal_faces([[0,1],[0,3]]) """ self._internal_faces = values def set_external_faces(self, values): """ Sets the list of external faces Examples -------- This example is given from the annulus cad_geometry object >>> geo.set_external_faces([[0,0],[0,2]]) """ self._external_faces = values def set_connectivity(self, values): """ Sets the list of connectivities. Connectivities is a list of dictionaries. Each dictionary contains two keys 'original' the master-face and 'clone' the slave-face. Each connectivity is a couple [patch_id, face_id] Examples -------- This example is given from the annulus cad_geometry object >>> list_connectivity = [] >>> dict_con = {} >>> dict_con['original'] = [0,1] >>> dict_con['clone'] = [0,3] >>> list_connectivity.append(dict_con) >>> geo.set_connectivity(list_connectivity) """ self._connectivity = values def add_internal_face(self, values): """ add an external face. Args: values: is a couple [patch_id,face_id] Examples -------- This example is given from the annulus cad_geometry object >>> geo.add_external_face([0,2]) """ self._internal_faces.append(values) def add_external_face(self, values): """ add an internal face. Args: values: is a couple [patch_id,face_id] Examples -------- This example is given from the annulus cad_geometry object >>> geo.add_internal_face([0,1]) """ self._external_faces.append(values) def add_connectivity(self, values): """ Add a Connectivity. Args: values: is a dictionary that contains two keys 'original' the master-face and 'clone' the slave-face. Each connectivity is a couple [patch_id, face_id] Examples -------- This example is given from the annulus cad_geometry object >>> dict_con = {} >>> dict_con['original'] = [0,1] >>> dict_con['clone'] = [0,3] >>> geo.add_connectivity(dict_con) """ self._connectivity.append(values) def set_attributs(self, attributs): """ sets attributs for the current cad_geometry object. Needed when constructing the geometry by *hand* """ self._attributs = attributs def set_attribut(self, attribut, value): """ sets attribut to the value value for the current cad_geometry object. Needed when constructing the geometry by *hand* """ self._attributs[attribut] = value def get_attribut(self, name): """ returns attributs for the current cad_geometry object. """ try: return self._attributs[name] except: return None @property def attributs(self): return self._attributs def __next__(self): if self.npatchs == 0: raise StopIteration self._currentElt += 1 if self._currentElt >= self.npatchs: self._currentElt = -1 raise StopIteration return self._list[self._currentElt] def __iter__(self): for nrb in self._list: yield nrb def __getitem__(self, key): return self._list[key] def append(self, nrb): """ append a cad_nurbs object in the current cad_geometry object. After appending all cad_nurbs, please do not forget to specify internal/external faces and the global connectivity. Args: nrb: a cad_nurbs object Examples -------- >>> from caid.cad_geometry import quart_circle, cad_geometry >>> nrb = quart_circle()[0] >>> geo = cad_geometry() >>> geo.append(nrb) """ list_objects = ["cad_nurbs", "cad_object", "cad_op_nurbs", "cad_grad_nurbs"] if nrb.__class__.__name__ in list_objects: self._list.append(nrb) else: print(("Warning: inserted object is not in ", list_objects)) print(("the current object is ", nrb.__class__.__name__)) cad_nrb = cad_nurbs.__new__(cad_nurbs) cad_nrb._array = nrb.array cad_nrb._knots = nrb.knots self._list.append(cad_nrb) # raise NameError("cad_geometry class only handles cad_nurbs objects") def remove(self, nrb): """ remove a cad_nurbs object in the current cad_geometry object. After removing a cad_nurbs, please do not forget to specify internal/external faces and the global connectivity. Args: nrb: a cad_nurbs object Examples -------- >>> from caid.cad_geometry import quart_circle, cad_geometry >>> nrb = quart_circle()[0] >>> geo = cad_geometry() >>> geo.append(nrb) >>> print geo.npatchs 1 >>> geo.remove(nrb) >>> print geo.npatchs 0 """ self._list.remove(nrb) def initialize_info(self): self._internal_faces = [] self._external_faces = [] self._connectivity = [] def translate(self, displ, axis=None): for nrb in self: nrb.translate(displ, axis=axis) def move(self, displ, axis=None): for nrb in self: nrb.move(displ, axis=axis) def scale(self, scale, axis=None): for nrb in self: nrb.scale(scale, axis=axis) def rotate(self, angle, axis=2): for nrb in self: nrb.rotate(angle, axis=axis) def refine(self, id=None, list_t=None, list_p=None, list_m=None): """ refine the current cad_geometry object. If id is not specified, the refinement will affect all patchs. Otherwise, it will refine the given patch. Args: list_t (list of 1D numpy array): these are the internal knots to insert. You can use duplicated knots for the k-refinement list_p (list of int): this is the list of the final B-spline degrees list_m (list of int): this is the list of multiplicities of inserted knots for each axis Examples -------- This example is taken from a the construction of the unit square as a cad_geometry >>> list_t = None >>> if n is not None: >>> list_t = [] >>> for axis in range(0,cad_nrb.dim): >>> ub = cad_nrb.knots[axis][0] >>> ue = cad_nrb.knots[axis][-1] >>> list_t.append(np.linspace(ub,ue,n[axis]+2)[1:-1]) >>> list_p = None >>> if p is not None: >>> list_p = [] >>> for axis in range(0,cad_nrb.dim): >>> list_p.append(p[axis] - cad_nrb.degree[axis]) >>> geo.refine(list_t=list_t, list_p=list_p, list_m=[1,1]) """ if id is not None: list_id = [id] else: list_id = list(range(0, self.npatchs)) ll_hrefine = False if list_t is not None: ll_hrefine = True ll_prefine = False if list_p is not None: ll_prefine = True if list_m is None: list_m = np.ones(self.dim, dtype=np.int) for id in list_id: patch = self._list[id] if ll_prefine: P_ = patch dim = P_.dim for i in range(0,dim): if list_p[i] > 0: P_ = P_.clone().elevate(i, times=list_p[i]) self._list[id] = P_ patch = self._list[id] if ll_hrefine: P_ = patch dim = P_.dim for i in range(0,dim): if len(list_t[i]) > 0: m = list_m[i] for j in range(0,m): P_ = P_.clone().refine(i, list_t[i]) self._list[id] = P_ def expand(self): """ returns a list of cad_geometries for each patch the connectivity is then broken. .. todo: handle the internal/external faces automatically """ list_cad_geo = [] for i in range(0, self.npatchs): tmp = cad_geometry(geo=self._list[i]) list_cad_geo.append(tmp) return list_cad_geo def evalMesh(self, id=None, npts=3, arr_npts=None): """ Evaluate and return the mesh Kwargs: id: if None the evaluation will be done over all patchs. Otherwise, it will be done on the specified patch Returns: TODO """ if id is None: list_id = list(range(0, self.npatchs)) else: list_id = [id] list_Mesh = [] for i in list_id: geo = self._list[i] type_geo = geo.__class__.__name__ if type_geo == "cad_nurbs" : list_Mesh.append(geo.evalMesh(npts, arr_npts)) return list_Mesh def copy(self): """ copy the current cad_geometry object Returns: a cad_geometry """ geo = cad_geometry.__new__(type(self)) for i in range(0, self.npatchs): P = self[i] geo.append(P.copy()) geo.set_internal_faces(self.internal_faces) geo.set_external_faces(self.external_faces) geo.set_connectivity(self.connectivity) return geo def clone(self): """ clone the current cad_geometry object Returns: a cad_geometry """ geo = cad_geometry.__new__(type(self)) for i in range(0, self.npatchs): P = self[i] geo.append(P.clone()) geo.set_internal_faces(self.internal_faces) geo.set_external_faces(self.external_faces) geo.set_connectivity(self.connectivity) geo.set_attributs(self.attributs) return geo def polarExtrude(self, t=0., xyzc=None): """ this routine creates a 2D geometry from a 1D closed curve self must be a curve Kwargs: xyzc (array): the center of the domain. default value is the mean of each of the coordinates t (float): if t> 0. it constructs an internal curve using a homothetic with respect to xyzc Returns: a cad_geometry """ crv = self[0] P = crv.points x = P[:,0] ; y = P[:,1] if xyzc is None: xc = x.mean() ; yc = y.mean() else: xc = xyzc[0] ; yc = xyzc[1] Qx = xc + t * (x-xc) Qy = yc + t * (y-yc) Q = np.zeros_like(P) Q[:,0] = Qx ; Q[:,1] = Qy n = P.shape[0] C = np.zeros((2,n,3)) C[0,:,:] = Q C[1,:,:] = P W = np.zeros((2,n)) W[0,:] = crv.weights W[1,:] = crv.weights knots = [np.asarray([0., 0., 1., 1.]), crv.knots[0]] srf = cad_nurbs(knots, C, weights=W) srf.rational = crv.rational srf.orientation = [1,-1,-1,1] geo = cad_geometry() geo.append(srf) if t > 0.: geo._internal_faces = [[0,0],[0,2]] geo._external_faces = [[0,1],[0,3]] if t == 0.: geo._internal_faces = [[0,0],[0,1],[0,2]] geo._external_faces = [[0,3]] geo._connectivity = [] dict_con = {} dict_con['original'] = [0,0]; dict_con['clone'] = [0,2] geo._connectivity.append(dict_con) return geo def merge(self, geo_s, npts=5, tol=1.e-3): """ merge two cad_geometries and return a new cad_geometry including an update for internal/external faces and connectivity. TODO: treate the 1D and 3D cases Args: geos_s (cad_geometry): This is the slave cad_geometry Kwargs: npts (int): used to evaluate and check equality of the extracted boundaries. Default value is 5 Returns: a cad_geometry Examples -------- This example is taken from the circle_5mp >>> # ... Import the quart_circle domain >>> geo_1 = quart_circle(rmin=rmin, rmax=rmax, n=n,p=p) >>> # ... Import the quart_circle domain >>> geo_2 = quart_circle(rmin=rmin, rmax=rmax, n=n,p=p) >>> geo_2[0].rotate(0.5*np.pi) >>> # ... Import the quart_circle domain >>> geo_3 = quart_circle(rmin=rmin, rmax=rmax, n=n,p=p) >>> geo_3[0].rotate(np.pi) >>> geo_3[0].reverse(0) >>> # ... Import the quart_circle domain >>> geo_4 = quart_circle(rmin=rmin, rmax=rmax, n=n,p=p) >>> geo_4[0].rotate(1.5*np.pi) >>> geo_4[0].reverse(0) >>> # ... Import the circle domain >>> geo_5 = circle(radius=rmin,n=n,p=p) >>> geo_5[0].rotate(0.25*np.pi) >>> geo_5[0].rotate(0.5*np.pi) >>> geo_12 = geo_1.merge(geo_2) >>> geo_34 = geo_3.merge(geo_4) >>> geo_1234 = geo_12.merge(geo_34) >>> geo = geo_1234.merge(geo_5) """ geo_m = self geo = cad_geometry() # ... copy master patchs for i_m in range(0, geo_m.npatchs): geo.append(geo_m[i_m]) # ... copy slave patchs for i_s in range(0, geo_s.npatchs): geo.append(geo_s[i_s]) connectivity = [] for dict_con in geo_m._connectivity: connectivity.append(dict_con) for dict_con in geo_s._connectivity: [i_m,f_m] = dict_con['original'] [i_s,f_s] = dict_con['clone'] _dict_con = {} _dict_con['original'] = [i_m+geo_m.npatchs,f_m] _dict_con['clone'] = [i_s+geo_m.npatchs,f_s] connectivity.append(_dict_con) nfaces = 2 * geo_m.dim intext_faces_m = np.zeros((geo_m.npatchs, nfaces), dtype=np.int) intext_faces_s = np.zeros((geo_s.npatchs, nfaces), dtype=np.int) for i_m in range(0, geo_m.npatchs): nrb_m = geo_m[i_m] for i_s in range(0, geo_s.npatchs): nrb_s = geo_s[i_s] f_m = 0 for axis_m in range(0, nrb_m.dim): for i_bnd_m in range(0, 2): bnd_m = nrb_m.extract_face(axis_m, i_bnd_m).clone() u_m = np.linspace(bnd_m.knots[0][0],bnd_m.knots[0][-1],npts) P_m = bnd_m(u_m) f_s = 0 for axis_s in range(0, nrb_s.dim): for i_bnd_s in range(0, 2): bnd_s = nrb_s.extract_face(axis_s, i_bnd_s).clone() u_s = np.linspace(bnd_s.knots[0][0],bnd_s.knots[0][-1],npts) P_s = bnd_s(u_s) isSameFace = np.allclose(P_m, P_s, rtol=0, atol=tol) isInvertFace = np.allclose(P_m[::-1], P_s, rtol=0, atol=tol) if isSameFace: dict_con = {} dict_con['original'] = [i_m,f_m] dict_con['clone'] = [i_s+geo_m.npatchs,f_s] connectivity.append(dict_con) intext_faces_m[i_m, f_m] = 1 intext_faces_s[i_s, f_s] = 1 if isInvertFace: print(("Merging Error: Found uncorrect orientation. Please change the orientation of the patchs (master,slave): ("\ ,i_m,",",i_s,").")) print(("Occured on the faces (master, slave): (",f_m,",",f_s,").")) f_s += 1 f_m += 1 for intFace in geo_m._internal_faces: [i, f] = intFace if intext_faces_m[i, f] == 0: intext_faces_m[i, f] = 1 for intFace in geo_s._internal_faces: [i,f] = intFace if intext_faces_s[i, f] == 0: _intFace = [i+geo_m.npatchs,f] intext_faces_s[i, f] = 1 internalFaces = [] externalFaces = [] for i_m in range(0, geo_m.npatchs): for f_m in range(0, nfaces): if intext_faces_m[i_m,f_m] == 1: internalFaces.append([i_m, f_m]) else: externalFaces.append([i_m, f_m]) for i_s in range(0, geo_s.npatchs): for f_s in range(0, nfaces): if intext_faces_s[i_s,f_s] == 1: internalFaces.append([i_s+geo_m.npatchs, f_s]) else: externalFaces.append([i_s+geo_m.npatchs, f_s]) geo._connectivity = connectivity geo._internal_faces = internalFaces geo._external_faces = externalFaces return geo def update(self, npts=5): list_geo = self.expand() if len(list_geo) == 1: self.update_patch(i_m=0, npts=npts) if len(list_geo) > 1: geo = merge(list_geo, npts=npts) self.set_internal_faces(geo.internal_faces) self.set_external_faces(geo.external_faces) self.set_connectivity(geo.connectivity) def update_patch(self, i_m=0, npts=50): """ merge two cad_geometries and return a new cad_geometry including an update for internal/external faces and connectivity. TODO: treate the 1D and 3D cases Args: geos_s (cad_geometry): This is the slave cad_geometry Kwargs: npts (int): used to evaluate and check equality of the extracted boundaries. Default value is 50 Returns: a cad_geometry Examples -------- This example is taken from the circle_5mp >>> # ... Import the quart_circle domain >>> geo_1 = quart_circle(rmin=rmin, rmax=rmax, n=n,p=p) >>> # ... Import the quart_circle domain >>> geo_2 = quart_circle(rmin=rmin, rmax=rmax, n=n,p=p) >>> geo_2[0].rotate(0.5*np.pi) >>> # ... Import the quart_circle domain >>> geo_3 = quart_circle(rmin=rmin, rmax=rmax, n=n,p=p) >>> geo_3[0].rotate(np.pi) >>> geo_3[0].reverse(0) >>> # ... Import the quart_circle domain >>> geo_4 = quart_circle(rmin=rmin, rmax=rmax, n=n,p=p) >>> geo_4[0].rotate(1.5*np.pi) >>> geo_4[0].reverse(0) >>> # ... Import the circle domain >>> geo_5 = circle(radius=rmin,n=n,p=p) >>> geo_5[0].rotate(0.25*np.pi) >>> geo_5[0].rotate(0.5*np.pi) >>> geo_12 = geo_1.merge(geo_2) >>> geo_34 = geo_3.merge(geo_4) >>> geo_1234 = geo_12.merge(geo_34) >>> geo = geo_1234.merge(geo_5) """ geo_m = self connectivity = [] nfaces = 2 * geo_m.dim intext_faces_m = np.zeros((geo_m.npatchs, nfaces), dtype=np.int) nrb_m = geo_m[i_m] f_m = 0 for axis_m in range(0, nrb_m.dim): for i_bnd_m in range(0, 2): f_m += 1 bnd_m = nrb_m.extract_face(axis_m, i_bnd_m).clone() u_m = np.linspace(bnd_m.knots[0][0],bnd_m.knots[0][-1],npts) P_m = bnd_m(u_m) f_s = 0 for axis_s in range(0, nrb_s.dim): for i_bnd_s in range(0, 2): f_s += 1 bnd_s = nrb_s.extract_face(axis_s, i_bnd_s).clone() u_s = np.linspace(bnd_s.knots[0][0],bnd_s.knots[0][-1],npts) P_s = bnd_s(u_s) isSameFace = np.allclose(P_m, P_s) isInvertFace = np.allclose(P_m[::-1], P_s) if isSameFace: dict_con = {} dict_con['original'] = [i_m,f_m] dict_con['clone'] = [i_m,f_s] connectivity.append(dict_con) intext_faces_m[i_m, f_m] = 1 intext_faces_m[i_m, f_s] = 1 if isInvertFace: print("Merging Error: Found uncorrect orientation.") print(("Occured on the faces (master, slave): (",f_m,",",f_s,").")) internalFaces = [] externalFaces = [] for i_m in range(0, geo_m.npatchs): for f_m in range(0, nfaces): if intext_faces_m[i_m,f_m] == 1: internalFaces.append([i_m, f_m]) else: externalFaces.append([i_m, f_m]) self.set_connectivity(connectivity) self.set_internal_faces(internalFaces) self.set_external_faces(externalFaces) def split(self,patch_id,t,axis,normalize=[True,False]): """ split the patch with id patch_id of the current cad_geometry, with respect to the knot t in the direction axis """ geo = self nrb = geo[patch_id] _nrb = nrb.clone() noccur = len([s for s in _nrb.knots[axis] if s == t]) p = _nrb.degree[axis] if p > noccur: list_t = t * np.ones(p-noccur) nrb.refine(axis,list_t) # ... it = min ([i for (i,s) in enumerate(nrb.knots[axis]) if s==t]) if nrb.dim == 1: axis = 0 u_1 = [s for s in nrb.knots[0] if s <= t] u_1.append(u_1[-1]) p = nrb.degree[axis] bezier = False if len(u_1) == 2*p+2: bezier = True if normalize[0] or bezier: _u = [ (u - u_1[0]) / (u_1[-1] - u_1[0]) for u in u_1 ] u_1 = _u P_1 = nrb.points[:it,:] W_1 = nrb.weights[:it] nrb_1 = cad_nurbs([u_1], P_1, weights=W_1) u_2 = [s for s in nrb.knots[0] if s >= t] u_2.insert(0,u_2[0]) p = nrb.degree[axis] bezier = False if len(u_2) == 2*p+2: bezier = True if normalize[1] or bezier: _u = [ (u - u_2[0]) / (u_2[-1] - u_2[0]) for u in u_2 ] u_2 = _u P_2 = nrb.points[it-1:] W_2 = nrb.weights[it-1:] nrb_2 = cad_nurbs([u_2], P_2, weights=W_2) # ... if nrb.dim == 2: if axis == 0: u_1 = [s for s in nrb.knots[0] if s <= t] u_1.append(u_1[-1]) p = nrb.degree[axis] bezier = False if len(u_1) == 2*p+2: bezier = True if normalize[0] or bezier: _u = [ (u - u_1[0]) / (u_1[-1] - u_1[0]) for u in u_1 ] u_1 = _u v = nrb.knots[1] P_1 = nrb.points[:it,:,:] W_1 = nrb.weights[:it,:] nrb_1 = cad_nurbs([u_1,v], P_1, weights=W_1) u_2 = [s for s in nrb.knots[0] if s >= t] u_2.insert(0,u_2[0]) p = nrb.degree[axis] bezier = False if len(u_2) == 2*p+2: bezier = True if normalize[1] or bezier: _u = [ (u - u_2[0]) / (u_2[-1] - u_2[0]) for u in u_2 ] u_2 = _u P_2 = nrb.points[it-1:,:,:] W_2 = nrb.weights[it-1:,:] nrb_2 = cad_nurbs([u_2,v], P_2, weights=W_2) if axis == 1: u = nrb.knots[0] v_1 = [s for s in nrb.knots[1] if s <= t] v_1.append(v_1[-1]) p = nrb.degree[axis] bezier = False if len(v_1) == 2*p+2: bezier = True if normalize[0] or bezier: _v = [ (v - v_1[0]) / (v_1[-1] - v_1[0]) for v in v_1 ] v_1 =_v P_1 = nrb.points[:,:it,:] W_1 = nrb.weights[:,:it] nrb_1 = cad_nurbs([u,v_1], P_1, weights=W_1) u = nrb.knots[0] v_2 = [s for s in nrb.knots[1] if s >= t] v_2.insert(0,v_2[0]) p = nrb.degree[axis] bezier = False if len(v_2) == 2*p+2: bezier = True if normalize[1] or bezier: _v = [ (v - v_2[0]) / (v_2[-1] - v_2[0]) for v in v_2 ] v_2 = _v P_2 = nrb.points[:,it-1:,:] W_2 = nrb.weights[:,it-1:] nrb_2 = cad_nurbs([u,v_2], P_2, weights=W_2) if nrb.dim == 3: raise NotImplementedError("Not yet implemented for 3D objects") self.remove(nrb) self.append(nrb_1) self.append(nrb_2) self.initialize_info() # self.update() def toBezier(self,patch_id): """ replace the current cad_nurbs by the corresponding extracted Bezier elements """ nrb = self[patch_id].clone() axis = 0 list_t = np.unique(nrb.knots[axis])[1:-1] geo_t = self.clone() for i,t in enumerate(list_t): geo_t.split(i,t,axis) if nrb.dim == 1: raise NotImplementedError("Not yet implemented") if nrb.dim == 2: geo_f = cad_geometry() for i in range(0,self.npatchs): if i != patch_id: srf = self[i] geo_f.append(srf) list_geo = geo_t.expand() for _geo in list_geo: axis = 1 list_t = np.unique(nrb.knots[axis])[1:-1] for i,t in enumerate(list_t): _geo.split(i,t,axis) for srf in _geo: geo_f.append(srf) if nrb.dim == 3: raise NotImplementedError("Not yet implemented") geo_f.initialize_info() return geo_f def add_patch(self, nrb): self.append(nrb) self.initialize_info() self.update() def to5patchs(self, face): """ convert the current cad_geometry-containing 1 patch: a domain with one hole- to a 5 patchs description. the user must provide the internal face """ # TODO must be updated with the new signature of extract_face raise NotImplementedError("Not yet implemented") if self.dim in [1,3]: raise ValueError("This functions is only for 2D domains") axis, side = entier_vers_couple(face) geo = self.clone() list_t = [0.25,0.5,0.75] for i,t in enumerate(list_t[:-1]): geo.split(i,t,axis) geo.split(-1,list_t[-1],axis, normalize=[True,True]) _geo = cad_geometry() for nrb in geo[::-1]: _geo.append(nrb) geo = _geo # ... Import the circle domain c0 = geo[0].extract_face(axis, side) c1 = geo[1].extract_face(axis, side) c1.reverse(0) c2 = geo[2].extract_face(axis, side) c2.reverse(0) c3 = geo[3].extract_face(axis, side) from igakit.cad import coons curves = [[c1,c3],[c0,c2]] srf = coons(curves) geo_t = cad_geometry() geo_t.append(cad_nurbs(srf.knots, srf.points, weights=srf.weights)) geo[1].reverse(1) geo[2].reverse(1) geo[0].transpose() geo[3].transpose() geo.update() geo_t[0].transpose() geo_f = geo.merge(geo_t) return geo_f def plotMesh(self, MeshResolution=3, color='k', ax=None, arr_npts=None): """ plot the corresponding mesh of the current cad_geometry. Kwargs: MeshResolution (int): Number of points inside each element. Default value is 3 color (string): mesh color arr_npts (np.array): Table containig the number of points inside element in each direction """ from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt geo = self if ax is None: _fig = plt.figure() if self.dim == 3: ax = _fig.add_subplot(111, projection='3d') for patch_id in range(0, geo.npatchs): list_Lines = geo.evalMesh(npts=MeshResolution, arr_npts=arr_npts)[patch_id] for Line in list_Lines: npts = Line.shape[0] list_iS = list(range(0,npts-1)) ; list_iE = list(range(1, npts)) for (i,i_1) in zip(list_iS, list_iE): P = Line[i ,:] P_1 = Line[i_1,:] x = P[0] ; y = P[1] ; z = P[2] x1 = P_1[0] ; y1 = P_1[1] ; z1 = P_1[2] if self.dim < 3: plt.plot([x,x1], [y,y1], '-'+str(color)) if self.dim == 3: for Line in list_Lines: x = Line[:,0] y = Line[:,1] z = Line[:,2] ax.plot(x,y,z, c='k') def plotJacobians(self, MeshResolution=10, vmin=None, vmax=None): """ plot the jacobian of the current cad_geometry object. The plot will be done on every patch from the cad_geometry Kwargs: MeshResolution (int): number of points per cell. Default values is 10 """ from matplotlib.pyplot import pcolor list_jac = [] list_xyz = [] for i in range(0, self.npatchs): nrb = self[i] list_t = [] for axis in range(0, nrb.dim): tx = np.linspace(0,1,MeshResolution) list_t.append(tx) Dw = nrb.evaluate_deriv(*list_t,nderiv=1) if self.dim in [1,3]: raise NotImplementedError("Error: Not yet implemented.") x = Dw[0,:,:,0] xdu = Dw[1,:,:,0] xdv = Dw[2,:,:,0] y = Dw[0,:,:,1] ydu = Dw[1,:,:,1] ydv = Dw[2,:,:,1] jac = xdu * ydv - xdv * ydu if np.abs(jac.max()) < 1.e-6: print(("=== patch ",i, " ===")) print(("jacobian[0,0] : ", jac[0,0])) print(("jacobian[0,-1] : ", jac[0,-1])) print(("jacobian[-1,0] : ", jac[-1,0])) print(("jacobian[-1,-1] : ", jac[-1,-1])) print(("min(jacobian) : ", jac.min())) print(("max(jacobian) : ", jac.max())) list_jac.append(jac) list_xyz.append([x,y]) if vmin is None: vmin = np.min(np.asarray([jac.min() for jac in list_jac])) if vmax is None: vmax = np.max(np.asarray([jac.max() for jac in list_jac])) for (xyz,jac) in zip(list_xyz,list_jac): if self.dim == 2: x = xyz[0] ; y = xyz[1] pcolor(x,y,jac,vmin=vmin,vmax=vmax) def bezier_extract(self): # TODO to be optimized. # construction of local matrices must be done in Fortran or c # ... check if we are using periodic bsplines is_periodic_uniform_bspline = False for nrb in self: condition = True for axis in range(0, nrb.dim): condition = condition \ and (len(nrb.breaks(axis))+2*nrb.degree[axis] == \ len(np.unique(nrb.knots[axis]))) is_periodic_uniform_bspline = is_periodic_uniform_bspline or condition if is_periodic_uniform_bspline and (len(self) > 1): raise ValueError("periodic uniform bsplines works only for 1 patch") if is_periodic_uniform_bspline: return self.bezier_extract_periodic() # ... from caid.utils.extraction import BezierExtraction from caid.numbering.connectivity import connectivity from scipy.sparse import csr_matrix, kron from time import time # t_begin = time() geo = self con = connectivity(geo) con.init_data_structure() # con.printinfo(with_LM=True, with_IEN=False, with_ID=False) # con.printinfo() # t_end = time() # print ">> time for init data structure ", t_end-t_begin # # t_begin = time() geo_ref = cad_geometry() list_extractors = [] list_matrices = [] for i in range(0, geo.npatchs): nrb = geo[i] extractor = BezierExtraction(nrb, check=False, verbose=False) list_extractors.append(extractor) list_matrices.append(extractor.matrices) nrb_ref = extractor.nrb_ref geo_ref.append(nrb_ref) # print "**********" # print "knots[0] ",nrb.knots[0] # print "knots[1] ",nrb.knots[1] # print "**********" # # print "**********" # print "knots-ref[0] ",nrb_ref.knots[0] # print "knots-ref[1] ",nrb_ref.knots[1] # print "**********" # t_end = time() # print ">> time for extraction ", t_end-t_begin con_ref = connectivity(geo_ref) con_ref.init_data_structure() # con_ref.printinfo(with_LM=True, with_IEN=False, with_ID=False) # t_begin = time() list_lmatrices = [] list_i = list(range(0, geo.npatchs)) for i in list_i: nrb = geo[i] nrb_ref = geo_ref[i] local_IEN = con.IEN[i] local_IEN_ref = con_ref.IEN[i] local_LM = con.LM[i] local_LM_ref = con_ref.LM[i] # local_ID = con.ID_loc[i] # local_ID_ref = con_ref.ID_loc[i] nelts = con.list_nel[i] nelts_ref = con_ref.list_nel[i] matrices = list_matrices[i] assert(nelts==nelts_ref) # t_mean = [] lmatrices = [] for elt in range(0, nelts): # t_elt_begin = time() # shif values because LM are 1 based indices list_iloc = np.asarray(local_LM[:,elt]) - 1 list_iloc_ref = np.asarray(local_LM_ref[:,elt]) - 1 # list_iloc = np.asarray(local_IEN[:,elt]) # list_iloc_ref = np.asarray(local_IEN_ref[:,elt]) # print "=======================" # print ">>>> element ", elt # print "list_iloc ", list_iloc # print "list_iloc_ref ", list_iloc_ref if geo.dim == 1: M = matrices[0] if geo.dim == 2: M1 = matrices[0] ; M2 = matrices[1] M = csr_matrix(kron(M2,M1)) # print M.shape, M1.shape, M2.shape if geo.dim == 3: M1 = matrices[0] ; M2 = matrices[1] ; M3 = matrices[2] M21 = csr_matrix(kron(M2,M1)) M = csr_matrix(kron(M3,M21)) Mloc = np.zeros((len(list_iloc_ref), len(list_iloc))) for j_num, j in enumerate(list_iloc): # print "----" for j_ref_num, j_ref in enumerate(list_iloc_ref): # print j, j_ref, j_num, j_ref_num Mloc[j_ref_num, j_num] = M[j_ref, j] # print Mloc.shape # print "=======================" Mloc = csr_matrix(Mloc) lmatrices.append(Mloc) # t_elt_end = time() # t_mean.append(t_elt_end-t_elt_begin) # # print ">>> mean time for an element " \ # , np.asarray(t_mean).sum()/nelts # print lmatrices # print [M.shape for M in lmatrices] list_lmatrices.append(lmatrices) # t_end = time() # print ">> time for local matrices ", t_end-t_begin return geo_ref, list_lmatrices def bezier_extract_periodic(self): """ self must contain one single object """ geo_ref = [] list_lmatrices = [] # ... convert the periodic knot vectors to open knot vectors geo_ref = cad_geometry() nrb = self[0] nrb_ref = nrb for axis in range(0, self.dim): nrb_ref = nrb_ref.copy().clamp(axis) geo_ref.append(nrb_ref) # ... # ... construct the bernstein basis nrb = geo_ref[0].copy() list_t = [] for axis in range(0, nrb.dim): brk, mult = nrb.breaks(axis=axis, mults=True) nbrk = len(mult) mult = np.asarray(mult) times = nrb.degree[axis] * np.ones(nbrk, dtype=np.int) - mult list_r = [] for t,k in zip(brk, times): for i in range(0, k): list_r.append(t) list_t.append(list_r) geo_ref.refine(id=0, list_t=list_t) # ... # print (">>> construct bernstein geometry done.") nrb = self[0] # ... construct matrix conversion from caid.conversion.tensorial_bsplines import matrix_conversion_ubspline_to_bernstein from scipy.sparse import csr_matrix, kron axis = 0 k = nrb.degree[axis] + 1 M_dir = matrix_conversion_ubspline_to_bernstein(k) M = csr_matrix(M_dir) for axis in range(1, nrb.dim): k = nrb.degree[axis] + 1 M_dir = matrix_conversion_ubspline_to_bernstein(k) M = csr_matrix(kron(M_dir,M)) # ... # print (">>> construct conversion matrix done.") # print M.shape # ... compute number of elements nelts = 1 for axis in range(0, nrb.dim): nelts *= len(nrb.breaks(axis)) - 1 # ... # print (">>> number of elements: ", nelts) # ... copy the matrix conversion for each element list_lmatrices = [] lmatrices = [] for elt in range(0, nelts): lmatrices.append(M) list_lmatrices.append(lmatrices) # ... return geo_ref, list_lmatrices def to_bezier_patchs_1d(self, filename=None): geo_ref, list_lmatrices = self.bezier_extract() def to_bezier_patchs_2d(self, filename=None): geo = self from caid.numbering.connectivity import connectivity con = connectivity(self) con.init_data_structure() geo_ref, list_lmatrices = self.bezier_extract() # TODO to replace with a loop over patchs # MUST BE DONE USING geo AND NOT geo_ref # nrb = geo_ref[0] nrb = geo[0] lmatrices = list_lmatrices[0] local_LM = con.LM[0] # ... # ... # we loop over each element and generate the P, # ... # we start by matching the 1D index with the 2D one lpi_n = nrb.shape lpi_p = nrb.degree list_Index = list(range(0, np.asarray(lpi_n).prod())) lpi_Index = np.asarray(list_Index).reshape(lpi_n[::-1]) lpi_Index = lpi_Index.transpose() list_i = list(range(0,lpi_n[0]-1,lpi_p[0])) list_j = list(range(0,lpi_n[1]-1,lpi_p[1])) lpi_nElt = [len(list_i), len(list_j)] list_IndexElt = list(range(0, np.asarray(lpi_nElt).prod())) list_IndexElt = np.asarray(list_IndexElt).reshape(lpi_nElt[::-1]) list_IndexElt = list_IndexElt.transpose() # ................................................. # ... sets the list of Nodes list_indexNodes = [] list_nodeData = [] # list_i = range(0,lpi_n[0],lpi_p[0]) # list_j = range(0,lpi_n[1],lpi_p[1]) list_i = list(range(0,lpi_n[0])) list_j = list(range(0,lpi_n[1])) for enum_j, j in enumerate(list_j): for enum_i, i in enumerate(list_i): # compute index element index i_elt = enum_i + enum_j * len(list_i) pts_x = nrb.points[i,j,0] pts_y = nrb.points[i,j,1] # ... # compute the boundary code, for dirichlet # ... boundaryCode = 0 if j in [0,lpi_n[1] - 1]: boundaryCode = 1 if i in [0,lpi_n[0] - 1]: boundaryCode = 1 # ... nodeData = [[boundaryCode], [pts_x, pts_y]] lineNodeData = [] for data in nodeData: for d in data: lineNodeData.append(d) list_nodeData.append(lineNodeData) # ... # ................................................. # ................................................. # ... sets the list of Elements # MUST BE DONE USING geo_ref AND NOT geo nrb = geo_ref[0] list_elementData = [] # list_i = list(range(0,lpi_n[0]-1,lpi_p[0])) # list_j = list(range(0,lpi_n[1]-1,lpi_p[1])) nx_elt = len(np.unique(nrb.knots[0])) - 1 ny_elt = len(np.unique(nrb.knots[1])) - 1 list_i = list(range(0,nx_elt)) list_j = list(range(0,ny_elt)) for enum_j, j in enumerate(list_j): for enum_i, i in enumerate(list_i): # compute index element index i_elt = enum_i + enum_j * len(list_i) # TODO for each element, we must compute its neighbours neighbours = [-1, -1, -1, -1] pts_x = nrb.points[i:i+lpi_p[0]+1,j:j+lpi_p[1]+1,0] pts_y = nrb.points[i:i+lpi_p[0]+1,j:j+lpi_p[1]+1,1] pts_x = pts_x.reshape(pts_x.size) pts_y = pts_x.reshape(pts_y.size) # ... vertex indices list_indices = [] for _i in range(i, i+lpi_p[0]+1): for _j in range(j, j+lpi_p[1]+1): ind = _i + _j * lpi_n[0] list_indices.append(ind+1) # ... ux = nrb.knots[0] ; uy = nrb.knots[1] scale2D = ( ux[i+lpi_p[0]+1] - ux[i] ) * ( uy[j+lpi_p[1]+1] - uy[j] ) elementData = [[i_elt+1], lpi_p, pts_x, pts_y \ , [scale2D], neighbours, list_indices] lineElementData = [] for data in elementData: for d in data: lineElementData.append(d) list_elementData.append(lineElementData) # ... # ................................................. # ................................................. # ... sets the list of Basis list_basisData = [] # list_i = list(range(0,lpi_n[0]-1,lpi_p[0])) # list_j = list(range(0,lpi_n[1]-1,lpi_p[1])) nx_elt = len(np.unique(nrb.knots[0])) - 1 ny_elt = len(np.unique(nrb.knots[1])) - 1 list_i = list(range(0,nx_elt)) list_j = list(range(0,ny_elt)) for enum_j, j in enumerate(list_j): for enum_i, i in enumerate(list_i): # compute index element index i_elt = enum_i + enum_j * len(list_i) # ... local Bezier-extraction matrix M = lmatrices[i_elt] # print M.shape # print "========= ELT ", str(i_elt+1) , " ============" # for iM in range(0, M.shape[0]): # for jM in range(0, M.shape[1]): # print '%.15f' % M[iM,jM] # print "=====================" M = np.ravel(M, order='F') # ... basisData = [[i_elt+1], M] lineBasisData = [] for data in basisData: for d in data: lineBasisData.append(d) list_basisData.append(lineBasisData) # ... # ................................................. # ................................................. # ... sets the list of connectivities list_connectivityData = [] # list_i = list(range(0,lpi_n[0]-1,lpi_p[0])) # list_j = list(range(0,lpi_n[1]-1,lpi_p[1])) nx_elt = len(np.unique(nrb.knots[0])) - 1 ny_elt = len(np.unique(nrb.knots[1])) - 1 list_i = list(range(0,nx_elt)) list_j = list(range(0,ny_elt)) for enum_j, j in enumerate(list_j): for enum_i, i in enumerate(list_i): # compute index element index i_elt = enum_i + enum_j * len(list_i) # ... local Bezier-extraction matrix M = local_LM[:,i_elt] M = np.ravel(M, order='F') # M = M.reshape(M.size) # ... # ... number of non vanishing basis per element nen = (lpi_p[0] + 1) * (lpi_p[1] + 1) # ... connectivityData = [[i_elt+1], [nen], M] lineConnectivityData = [] for data in connectivityData: for d in data: lineConnectivityData.append(d) list_connectivityData.append(lineConnectivityData) # ... # ................................................. # ................................................. # ... sets the list of Dirichlet Basis functions for each Element # All external faces are set to Dirichlet list_DirFaces = [] for i in range(0, geo.npatchs): list_DirFaces.append([]) list_extFaces = geo.external_faces for extFaces in list_extFaces: patch_id = extFaces[0] face_id = extFaces[1] list_DirFaces[patch_id].append(face_id) # ... # ... compute the corresponding connectivity from caid.numbering.boundary_conditions import boundary_conditions con_dir = connectivity(geo) bc = boundary_conditions(geo) bc.dirichlet(geo, list_DirFaces) con_dir.init_data_structure(bc) # ... nrb = geo_ref[0] local_LM = con_dir.LM[0] list_dirichletData = [] # list_i = list(range(0,lpi_n[0]-1,lpi_p[0])) # list_j = list(range(0,lpi_n[1]-1,lpi_p[1])) nx_elt = len(np.unique(nrb.knots[0])) - 1 ny_elt = len(np.unique(nrb.knots[1])) - 1 list_i = list(range(0,nx_elt)) list_j = list(range(0,ny_elt)) for enum_j, j in enumerate(list_j): for enum_i, i in enumerate(list_i): # compute index element index i_elt = enum_i + enum_j * len(list_i) nen = (lpi_p[0]+1) * (lpi_p[1]+1) list_Dirichlet = np.zeros(nen, dtype=np.int) for enum_lm, lm in enumerate(local_LM[:, i_elt]): if lm == 0: list_Dirichlet[enum_lm] = 1 dirichletData = [[i_elt+1], [nen], list_Dirichlet] lineDirichletData = [] for data in dirichletData: for d in data: lineDirichletData.append(d) list_dirichletData.append(lineDirichletData) # ... # ................................................. # ................................................. # ... sets the B-net nrb = geo[0] lpi_n = nrb.shape lpi_p = nrb.degree list_BnetData = [] list_nBnet = [] # list_i = list(range(0,lpi_n[0]-1,lpi_p[0])) # list_j = list(range(0,lpi_n[1]-1,lpi_p[1])) nx_elt = len(np.unique(nrb.knots[0])) - 1 ny_elt = len(np.unique(nrb.knots[1])) - 1 list_i = list(range(0,nx_elt)) list_j = list(range(0,ny_elt)) for enum_j, j in enumerate(list_j): for enum_i, i in enumerate(list_i): # compute index element index i_elt = enum_i + enum_j * len(list_i) # ... vertex indices list_indices = [] for _j in range(j, j+lpi_p[1]): for _i in range(i, i+lpi_p[0]): _i1 = _i + 1 _j1 = _j + 1 # P00 = [i,j] I_00 = _i + _j * lpi_n[0] # P10 = [i+1,j] I_10 = _i1 + _j * lpi_n[0] # P01 = [i,j+1] I_01 = _i + _j1 * lpi_n[0] # P11 = [i+1,j+1] I_11 = _i1 + _j1 * lpi_n[0] indices = [I_00 + 1, I_10 + 1, I_11 + 1, I_01 + 1] list_indices.append(indices) # ... nBnet = len(list_indices) list_nBnet.append(nBnet) BnetData = [[i_elt+1], [nBnet], list_indices] lineBnetData = [] for data in BnetData: for d in data: lineBnetData.append(d) list_BnetData.append(lineBnetData) # ... # ................................................. if filename is not None: # ................................................. # ... exporting files fmt = '%.15f' fmt_int = '%d' fmt_nodes = '%d, %.15f, %.15f' # ................................................. # ................................................. a = open(filename+"_nodes.txt", "w") # ... write size of list_nodeData a.write(str(len(list_nodeData))+' \n') for L in list_nodeData: # line = ''.join(str(fmt % e)+', ' for e in L)[:-2]+' \n' line = fmt_nodes % tuple(L) +' \n' a.write(line) a.close() # ................................................. # ................................................. a = open(filename+"_elements.txt", "w") # ... write size of list_elementData a.write(str(len(list_elementData))+' \n') # ... write maximum of spline degrees maxDegree = np.max(np.asarray([np.max(np.asarray(nrb.degree)) for nrb in self])) a.write(str(maxDegree)+' \n') for L in list_elementData: # ... element id line = str(L[0]) + ' \n' a.write(line) # ... spline degree lpi_p = L[1:3] line = str(lpi_p[0]) + ', ' + str(lpi_p[1]) + ' \n' a.write(line) # ... line = ''.join(str(fmt % e)+', ' for e in L[3:])[:-2]+' \n' a.write(line) a.close() # ................................................. # ................................................. a = open(filename+"_basis.txt", "w") # ... write size of list_basisData a.write(str(len(list_basisData))+' \n') for L in list_basisData: # ... element id line = str(L[0]) + ' \n' a.write(line) line = ''.join(str(fmt % e)+', ' for e in L[1:])[:-2]+' \n' a.write(line) a.close() # ................................................. # ................................................. a = open(filename+"_connectivity.txt", "w") # ... write size of list_connectivityData a.write(str(len(list_connectivityData))+' \n') for L in list_connectivityData: # ... element id line = str(L[0]) + ' \n' a.write(line) # ... number of non vanishing basis per element line = str(L[1]) + ' \n' a.write(line) # ... local LM line = ''.join(str(fmt_int % e)+', ' for e in L[2:])[:-2]+' \n' a.write(line) a.close() # ... # ................................................. a = open(filename+"_dirichlet.txt", "w") # ... write size of list_connectivityData a.write(str(len(list_dirichletData))+' \n') for L in list_dirichletData: # ... element id line = str(L[0]) + ' \n' a.write(line) # ... number of non vanishing basis per element line = str(L[1]) + ' \n' a.write(line) # ... dirichlet nodes line = ''.join(str(fmt_int % e)+', ' for e in L[2:])[:-2]+' \n' a.write(line) a.close() # ... # ................................................. a = open(filename+"_bnet.txt", "w") # ... write size of list_connectivityData a.write(str(len(list_BnetData))+' \n') # ... write max of n-Bnet per element max_nbnet = np.max(np.asarray(list_nBnet)) a.write(str(max_nbnet)+' \n') for L in list_BnetData: # ... element id line = str(L[0]) + ' \n' a.write(line) # ... nBnet line = str(L[1]) + ' \n' a.write(line) # ... B-net nodes indices for data in L[2:]: line = ''.join(str(fmt_int % e)+', ' for e in data)[:-2]+' \n' a.write(line) a.close() # ... return list_nodeData, list_elementData def to_bezier_patchs_3d(self, filename=None): geo_ref, list_lmatrices = self.bezier_extract() def to_bezier_patchs(self, filename=None): geo_ref, list_lmatrices = self.bezier_extract() if self.dim == 1: self.to_bezier_patchs_1d(filename=filename) if self.dim == 2: self.to_bezier_patchs_2d(filename=filename) if self.dim == 3: self.to_bezier_patchs_3d(filename=filename) def to_bezier_jorek(self, patch_id, filename=None): """ this routine transforms the current geometry into cubic Bezier patchs works only if dim == 2 """ list_master_faces = [d['original'] for d in self.connectivity] list_slave_faces = [d['clone'] for d in self.connectivity] # ... def from_ij_to_face_id(i,j, api_n): face_id = None if (i == api_n[0]-1) and (j < api_n[1]): face_id = 3 return face_id if (i < api_n[0]) and (j == api_n[1]-1): face_id = 2 return face_id if (i == 0) and (j < api_n[1]): face_id = 1 return face_id if (i < api_n[0]) and (j == 0): face_id = 0 return face_id # ... # ... def duplicated_code_from_ij(i,j, api_n): face_id = from_ij_to_face_id(i,j, api_n) dupliCode = 0 # first we need to know if the current face is a master or a slave one if [patch_id, face_id] in list_master_faces: dupliCode = 1 if [patch_id, face_id] in list_slave_faces: dupliCode = 2 # ... return dupliCode # ... # ... TODO add loop on patchs here nrb = self[patch_id] #.copy() if nrb.dim != 2 : print("to_bezier_jorek : Only works for dim=2") if min(nrb.degree) > 3 : print("to_bezier_jorek : Not yet implemented for splines with degree > 3") # ... # we elevate the spline degree to 3 # ... geo = cad_geometry() geo.append(nrb) list_p = [] for axis in range(0,nrb.dim): list_p.append(np.max(3 - nrb.degree[axis], 0)) geo.refine(list_p=list_p) # ... # ... # we elevate the interior knots multiplicities to 3 # ... list_knots = [] # print ">>>> " # print geo[0].knots[0] # print geo[0].knots[1] # print "<<<< " # print nrb.breaks(mults=True) for axis in range(0, nrb.dim): # ... TODO there is a problem with this part. # use arc circle as test list_t, list_mult = nrb.breaks(axis=axis, mults=True) list_t = list_t[1:-1] list_mult = list_mult[1:-1] # list_mult = [] # for t in list_t: # mult = len([s for s in nrb.knots[axis] if s==t]) # list_mult.append(mult) # print "=========" # print list_mult # print list_t list_t_new = [] for t,m in zip(list_t, list_mult): new_m = max(3-m,0) for i in range(0,new_m): list_t_new.append(t) list_knots.append(list_t_new) # print list_knots[0] # print list_knots[1] # print "=========" geo.refine(list_t=list_knots) # ... # ... list_extFaces = self.external_faces list_intFaces = self.internal_faces list_DirFaces = [] for i in range(0, self.npatchs): list_DirFaces.append([]) for extFaces in list_extFaces: nrb_id = extFaces[0] face_id = extFaces[1] list_DirFaces[nrb_id].append(face_id) # ... # ... TODO add loop on patchs here nrb = geo[0] node_index = 0 # ... # ... # we loop over each element and generate the P,u,v,w # ... # we start by matching the 1D index with the 2D one lpi_n = nrb.shape lpi_p = nrb.degree list_Index = list(range(0, np.asarray(lpi_n).prod())) lpi_Index = np.asarray(list_Index).reshape(lpi_n[::-1]) lpi_Index = lpi_Index.transpose() # ... create the coarse mesh rx = (lpi_n[0] - 1) // lpi_p[0] - 1 ry = (lpi_n[1] - 1) // lpi_p[1] - 1 tx = [0.]+list(linspace(0.,1.,rx+2))+[1.] ty = [0.]+list(linspace(0.,1.,ry+2))+[1.] knots_coarse = [tx,ty] C = np.zeros((rx+2,ry+2,3)) nrb_coarse = cad_nurbs(knots_coarse, C) geo_coarse = cad_geometry() geo_coarse.append(nrb_coarse) geo_coarse._internal_faces = self._internal_faces geo_coarse._external_faces = self._external_faces geo_coarse._connectivity = self._connectivity # ... # ... create the assiacted connectivity from caid.numbering.connectivity import connectivity con = connectivity(geo_coarse) con.init_data_structure() local_LM = con.LM[0] local_ID = con.ID_loc[0] # ... pts = nrb.points list_i = list(range(0,lpi_n[0]-1,lpi_p[0])) list_j = list(range(0,lpi_n[1]-1,lpi_p[1])) lpi_nElt = [len(list_i), len(list_j)] list_IndexElt = list(range(0, np.asarray(lpi_nElt).prod())) list_IndexElt = np.asarray(list_IndexElt).reshape(lpi_nElt[::-1]) list_IndexElt = list_IndexElt.transpose() # ... # sets the list of Nodes # ... list_indexNodes = [] list_huhvNodes = [] list_nodeData = [] Dirichlet_faces = list_DirFaces[patch_id] nx_elt = len(np.unique(nrb.knots[0])) - 1 ny_elt = len(np.unique(nrb.knots[1])) - 1 for enum_j,j in enumerate(range(0,lpi_n[1],lpi_p[1])): for enum_i,i in enumerate(range(0,lpi_n[0],lpi_p[0])): node_index += 1 i1 = i + 1 j1 = j + 1 li_signi = 1 li_signj = 1 currentElt = -1 if enum_i < nx_elt and enum_j < ny_elt: currentElt = enum_i + nx_elt * enum_j if ( i == lpi_n[0] - 1 ) : i1 = lpi_n[0] - 2 li_signi = -1 if ( j == lpi_n[1] - 1 ) : j1 = lpi_n[1] - 2 li_signj = -1 P00 = np.asarray(pts[i,j])[0:-1] # remove the 3D coordinate P10 = np.asarray(pts[i1,j])[0:-1] P01 = np.asarray(pts[i,j1])[0:-1] P11 = np.asarray(pts[i1,j1])[0:-1] li_00 = lpi_Index[tuple([i,j])] li_10 = lpi_Index[tuple([i1,j])] li_01 = lpi_Index[tuple([i,j1])] li_11 = lpi_Index[tuple([i1,j1])] list_indexNodes.append(li_00) globID = local_ID[enum_i,enum_j] # print currentElt, globID # ... # compute u, v and w # ... u = P10 - P00 ; hu = np.sqrt(u[0]**2+u[1]**2) v = P01 - P00 ; hv = np.sqrt(v[0]**2+v[1]**2) w = P11 + P00 - P10 - P01 if (hu < 1.e-10): hu = 1.0 if (hv < 1.e-10): hv = 1.0 u /= hu v /= hv w /= ( hu * hv ) u *= li_signi v *= li_signj w *= li_signi * li_signj list_huhvNodes.append([hu,hv]) # ... # ................................... # Boundary treatment # ................................... face_id = from_ij_to_face_id(i,j,lpi_n) # ... # compute the boundary code for external faces # ... boundaryCode = 0 # if face_id in Dirichlet_faces: # if j in [0,lpi_n[1] - 1]: # boundaryCode += 1 # if i in [0,lpi_n[0] - 1]: # boundaryCode += 2 if j in [0,lpi_n[1] - 1]: boundaryCode += 1 if i in [0,lpi_n[0] - 1]: boundaryCode += 2 # ... # ... # Duplication code internal faces # 2 son # 1 father # 0 other # TODO make compatible with multi patch => add test on faces # ... dupliCode = 0 dupliCode = duplicated_code_from_ij(i,j, lpi_n) # ... # ................................... # ... nodeData = [P00 \ , u, v, w \ , [boundaryCode], [globID], [dupliCode]] lineNodeData = [] for data in nodeData: for d in data: lineNodeData.append(d) list_nodeData.append(lineNodeData) # ... # ... # ... list_elementData = [] for j in list_j: for i in list_i: P00 = pts[i ,j ,0:3] # remove the 3D coordinate P10 = pts[i+1,j ,0:3] P01 = pts[i ,j+1,0:3] P11 = pts[i+1,j+1,0:3] # ... # compute u, v and w # ... u = P10 - P00 ; hu = sqrt(u[0]**2+u[1]**2) v = P01 - P00 ; hv = sqrt(v[0]**2+v[1]**2) w = P11 + P00 - P10 - P01 u /= hu v /= hv w /= ( hu * hv ) # ... # ... # compute neighbours element # neighbours[0] : bottom # neighbours[1] : left # neighbours[2] : top # neighbours[3] : right # neighbours[d] = -1 if no neighbour in the direction d # ... neighbours = zeros(4)# array([-1, -1, -1, -1]) # bottom if j == list_j[0]: neighbours[0] = -1 else: iElt = list_i.index(i) ; jElt = list_j.index(j) neighbours[0] = list_IndexElt[tuple([iElt,jElt-1])] # left if i == list_i[0]: neighbours[1] = -1 else: iElt = list_i.index(i) ; jElt = list_j.index(j) neighbours[1] = list_IndexElt[tuple([iElt-1,jElt])] # top if j == list_j[-1]: neighbours[2] = -1 else: iElt = list_i.index(i) ; jElt = list_j.index(j) neighbours[2] = list_IndexElt[tuple([iElt,jElt+1])] # right if i == list_i[-1]: neighbours[3] = -1 else: iElt = list_i.index(i) ; jElt = list_j.index(j) neighbours[3] = list_IndexElt[tuple([iElt+1,jElt])] neighbours += 1 # ... i_00 = lpi_Index[tuple([ i, j])] i_30 = lpi_Index[tuple([i+lpi_p[0], j])] i_03 = lpi_Index[tuple([ i,j+lpi_p[1]])] i_33 = lpi_Index[tuple([i+lpi_p[0],j+lpi_p[1]])] I_00 = list_indexNodes.index(i_00) I_30 = list_indexNodes.index(i_30) I_03 = list_indexNodes.index(i_03) I_33 = list_indexNodes.index(i_33) # print "I_00, I_30, I_03, I_33 = ", I_00, I_30, I_03, I_33 [hu_00,hv_00] = list_huhvNodes[I_00] [hu_03,hv_03] = list_huhvNodes[I_03] [hu_30,hv_30] = list_huhvNodes[I_30] [hu_33,hv_33] = list_huhvNodes[I_33] # ... # For node 2 and 3 of an element in a square grid the direction of u # has to point to the left, i.e., the size has to be negative. # 30 and 33 => hu hu_30 *= -1.0 hu_33 *= -1.0 # ... # ... # For node 3 and 4, the vector v has to be negative. # 33 and 03 => hu hv_03 *= -1.0 hv_33 *= -1.0 # ... list_indexP = [I_00+1, I_30+1, I_33+1, I_03+1] list_huhv = [hu_00,hv_00, hu_30,hv_30, hu_33,hv_33, hu_03,hv_03] if ((hu_00-hu)**2 +(hv_00-hv)**2 > 1.e-7) : print("SERIOUS ERROR: hu_00 must be equal to hu and hv_00 to hv. But got the values") print((hu_00, hu)) print((hv_00, hv)) elementData = [list_indexP \ , list_huhv \ , neighbours] lineElementData = [] for data in elementData: for d in data: lineElementData.append(d) list_elementData.append(lineElementData) # ... if filename is not None: # ... # exporting files # ... fmt = '%.17f' a = open(filename+"_nodes.txt", "w") a.write(str(len(list_nodeData))+' \n') for L in list_nodeData: line = ''.join(str(fmt % e)+', ' for e in L)[:-2]+' \n' a.write(line) a.close() # a = open(filename+"_elements.txt", "w") a.write(str(len(list_elementData))+' \n') for L in list_elementData: line = ''.join(str(fmt % e)+', ' for e in L)[:-2]+' \n' a.write(line) a.close() # ... return list_nodeData, list_elementData
ratnania/caid
caid/cad_geometry.py
Python
mit
142,872
[ "VTK" ]
c567cf577389a0a8416abe99a5b4fb1d01db3e5381197141a9a7e8a8ed942315
""" Atomic coordinate featurizer. """ import logging import numpy as np from deepchem.feat.base_classes import Featurizer, ComplexFeaturizer from deepchem.feat.molecule_featurizers import AtomicCoordinates from deepchem.utils.data_utils import pad_array from deepchem.utils.rdkit_utils import MoleculeLoadException, get_xyz_from_mol, \ load_molecule, merge_molecules_xyz, merge_molecules def compute_neighbor_list(coords, neighbor_cutoff, max_num_neighbors, periodic_box_size): """Computes a neighbor list from atom coordinates.""" N = coords.shape[0] import mdtraj traj = mdtraj.Trajectory(coords.reshape((1, N, 3)), None) box_size = None if periodic_box_size is not None: box_size = np.array(periodic_box_size) traj.unitcell_vectors = np.array( [[[box_size[0], 0, 0], [0, box_size[1], 0], [0, 0, box_size[2]]]], dtype=np.float32) neighbors = mdtraj.geometry.compute_neighborlist(traj, neighbor_cutoff) neighbor_list = {} for i in range(N): if max_num_neighbors is not None and len(neighbors[i]) > max_num_neighbors: delta = coords[i] - coords.take(neighbors[i], axis=0) if box_size is not None: delta -= np.round(delta / box_size) * box_size dist = np.linalg.norm(delta, axis=1) sorted_neighbors = list(zip(dist, neighbors[i])) sorted_neighbors.sort() neighbor_list[i] = [ sorted_neighbors[j][1] for j in range(max_num_neighbors) ] else: neighbor_list[i] = list(neighbors[i]) return neighbor_list class NeighborListAtomicCoordinates(Featurizer): """ Adjacency List of neighbors in 3-space Neighbors determined by user-defined distance cutoff [in Angstrom]. https://en.wikipedia.org/wiki/Cell_list Ref: http://www.cs.cornell.edu/ron/references/1989/Calculations%20of%20a%20List%20of%20Neighbors%20in%20Molecular%20Dynamics%20Si.pdf Parameters ---------- neighbor_cutoff: float Threshold distance [Angstroms] for counting neighbors. periodic_box_size: 3 element array Dimensions of the periodic box in Angstroms, or None to not use periodic boundary conditions """ def __init__(self, max_num_neighbors=None, neighbor_cutoff=4, periodic_box_size=None): if neighbor_cutoff <= 0: raise ValueError("neighbor_cutoff must be positive value.") if max_num_neighbors is not None: if not isinstance(max_num_neighbors, int) or max_num_neighbors <= 0: raise ValueError("max_num_neighbors must be positive integer.") self.max_num_neighbors = max_num_neighbors self.neighbor_cutoff = neighbor_cutoff self.periodic_box_size = periodic_box_size # Type of data created by this featurizer self.dtype = object self.bohr_coords_featurizer = AtomicCoordinates(use_bohr=True) self.coords_featurizer = AtomicCoordinates(use_bohr=False) def _featurize(self, mol): """ Compute neighbor list. Parameters ---------- mol: rdkit Mol To be featurized. """ # TODO(rbharath): Should this return a list? bohr_coords = self.bohr_coords_featurizer._featurize(mol) coords = self.coords_featurizer._featurize(mol) neighbor_list = compute_neighbor_list(coords, self.neighbor_cutoff, self.max_num_neighbors, self.periodic_box_size) return (bohr_coords, neighbor_list) class NeighborListComplexAtomicCoordinates(ComplexFeaturizer): """ Adjacency list of neighbors for protein-ligand complexes in 3-space. Neighbors dtermined by user-dfined distance cutoff. """ def __init__(self, max_num_neighbors=None, neighbor_cutoff=4): if neighbor_cutoff <= 0: raise ValueError("neighbor_cutoff must be positive value.") if max_num_neighbors is not None: if not isinstance(max_num_neighbors, int) or max_num_neighbors <= 0: raise ValueError("max_num_neighbors must be positive integer.") self.max_num_neighbors = max_num_neighbors self.neighbor_cutoff = neighbor_cutoff # Type of data created by this featurizer self.dtype = object def _featurize(self, mol_pdb_file, protein_pdb_file): """ Compute neighbor list for complex. Parameters ---------- mol_pdb_file: str Filename for ligand pdb file. protein_pdb_file: str Filename for protein pdb file. """ mol_coords, ob_mol = load_molecule(mol_pdb_file) protein_coords, protein_mol = load_molecule(protein_pdb_file) system_coords = merge_molecules_xyz([mol_coords, protein_coords]) system_neighbor_list = compute_neighbor_list( system_coords, self.neighbor_cutoff, self.max_num_neighbors, None) return (system_coords, system_neighbor_list) class ComplexNeighborListFragmentAtomicCoordinates(ComplexFeaturizer): """This class computes the featurization that corresponds to AtomicConvModel. This class computes featurizations needed for AtomicConvModel. Given a two molecular structures, it computes a number of useful geometric features. In particular, for each molecule and the global complex, it computes a coordinates matrix of size (N_atoms, 3) where N_atoms is the number of atoms. It also computes a neighbor-list, a dictionary with N_atoms elements where neighbor-list[i] is a list of the atoms the i-th atom has as neighbors. In addition, it computes a z-matrix for the molecule which is an array of shape (N_atoms,) that contains the atomic number of that atom. Since the featurization computes these three quantities for each of the two molecules and the complex, a total of 9 quantities are returned for each complex. Note that for efficiency, fragments of the molecules can be provided rather than the full molecules themselves. """ def __init__(self, frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors, neighbor_cutoff, strip_hydrogens=True): self.frag1_num_atoms = frag1_num_atoms self.frag2_num_atoms = frag2_num_atoms self.complex_num_atoms = complex_num_atoms self.max_num_neighbors = max_num_neighbors self.neighbor_cutoff = neighbor_cutoff self.strip_hydrogens = strip_hydrogens self.neighborlist_featurizer = NeighborListComplexAtomicCoordinates( self.max_num_neighbors, self.neighbor_cutoff) def _featurize(self, mol_pdb_file, protein_pdb_file): try: frag1_coords, frag1_mol = load_molecule( mol_pdb_file, is_protein=False, sanitize=True, add_hydrogens=False) frag2_coords, frag2_mol = load_molecule( protein_pdb_file, is_protein=True, sanitize=True, add_hydrogens=False) except MoleculeLoadException: # Currently handles loading failures by returning None # TODO: Is there a better handling procedure? logging.warning("Some molecules cannot be loaded by Rdkit. Skipping") return None system_mol = merge_molecules([frag1_mol, frag2_mol]) system_coords = get_xyz_from_mol(system_mol) frag1_coords, frag1_mol = self._strip_hydrogens(frag1_coords, frag1_mol) frag2_coords, frag2_mol = self._strip_hydrogens(frag2_coords, frag2_mol) system_coords, system_mol = self._strip_hydrogens(system_coords, system_mol) try: frag1_coords, frag1_neighbor_list, frag1_z = self.featurize_mol( frag1_coords, frag1_mol, self.frag1_num_atoms) frag2_coords, frag2_neighbor_list, frag2_z = self.featurize_mol( frag2_coords, frag2_mol, self.frag2_num_atoms) system_coords, system_neighbor_list, system_z = self.featurize_mol( system_coords, system_mol, self.complex_num_atoms) except ValueError: logging.warning( "max_atoms was set too low. Some complexes too large and skipped") return None return frag1_coords, frag1_neighbor_list, frag1_z, frag2_coords, frag2_neighbor_list, frag2_z, \ system_coords, system_neighbor_list, system_z def get_Z_matrix(self, mol, max_atoms): if len(mol.GetAtoms()) > max_atoms: raise ValueError("A molecule is larger than permitted by max_atoms. " "Increase max_atoms and try again.") return pad_array( np.array([atom.GetAtomicNum() for atom in mol.GetAtoms()]), max_atoms) def featurize_mol(self, coords, mol, max_num_atoms): logging.info("Featurizing molecule of size: %d", len(mol.GetAtoms())) neighbor_list = compute_neighbor_list(coords, self.neighbor_cutoff, self.max_num_neighbors, None) z = self.get_Z_matrix(mol, max_num_atoms) z = pad_array(z, max_num_atoms) coords = pad_array(coords, (max_num_atoms, 3)) return coords, neighbor_list, z def _strip_hydrogens(self, coords, mol): class MoleculeShim(object): """ Shim of a Molecule which supports #GetAtoms() """ def __init__(self, atoms): self.atoms = [AtomShim(x) for x in atoms] def GetAtoms(self): return self.atoms class AtomShim(object): def __init__(self, atomic_num): self.atomic_num = atomic_num def GetAtomicNum(self): return self.atomic_num if not self.strip_hydrogens: return coords, mol indexes_to_keep = [] atomic_numbers = [] for index, atom in enumerate(mol.GetAtoms()): if atom.GetAtomicNum() != 1: indexes_to_keep.append(index) atomic_numbers.append(atom.GetAtomicNum()) mol = MoleculeShim(atomic_numbers) coords = coords[indexes_to_keep] return coords, mol
lilleswing/deepchem
deepchem/feat/complex_featurizers/complex_atomic_coordinates.py
Python
mit
9,653
[ "MDTraj", "RDKit" ]
b1cef1b83fac5825c96fb42ac879f8954c8f0da09cf3515f1837d6e5031effe8
# -*- coding: utf-8 -*- """ This file contains the logic responsible for coordinating laser scanning. Qudi is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Qudi is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Qudi. If not, see <http://www.gnu.org/licenses/>. Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/> """ from qtpy import QtCore from collections import OrderedDict import numpy as np import time import datetime import matplotlib as mpl import matplotlib.pyplot as plt from logic.generic_logic import GenericLogic from core.util.mutex import Mutex class HardwarePull(QtCore.QObject): """ Helper class for running the hardware communication in a separate thread. """ def __init__(self, parentclass): super().__init__() # remember the reference to the parent class to access functions ad settings self._parentclass = parentclass def handle_timer(self, state_change): """ Threaded method that can be called by a signal from outside to start the timer. @param bool state: (True) starts timer, (False) stops it. """ if state_change: self.timer = QtCore.QTimer() self.timer.timeout.connect(self._update_data) self.timer.start(self._parentclass._logic_acquisition_timing) else: if hasattr(self, 'timer'): self.timer.stop() def _update_data(self): """ This method gets the count data from the hardware. It runs repeatedly in the logic module event loop by being connected to sigCountNext and emitting sigCountNext through a queued connection. """ hardware = self._parentclass._wavemeter_device self._parentclass.current_wavelength = 1.0 * hardware.get_current_wavelength() time_stamp = time.time() - self._parentclass._acqusition_start_time # only wavelength >200 nm make sense, ignore the rest if self._parentclass.current_wavelength > 200: self._parentclass._wavelength_data.append( np.array([time_stamp, self._parentclass.current_wavelength]) ) # check if we have a new min or max and save it if so if self._parentclass.current_wavelength > self._parentclass.intern_xmax: self._parentclass.intern_xmax = self._parentclass.current_wavelength if self._parentclass.current_wavelength < self._parentclass.intern_xmin: self._parentclass.intern_xmin = self._parentclass.current_wavelength if ( (not self._parentclass._counter_logic.get_saving_state()) or self._parentclass._counter_logic.getState() == 'idle' ): self._parentclass.stop_scanning() class WavemeterLoggerLogic(GenericLogic): """This logic module gathers data from wavemeter and the counter logic. """ sig_data_updated = QtCore.Signal() sig_update_histogram_next = QtCore.Signal(bool) sig_handle_timer = QtCore.Signal(bool) sig_new_data_point = QtCore.Signal(list) sig_fit_updated = QtCore.Signal() _modclass = 'laserscanninglogic' _modtype = 'logic' # declare connectors _connectors = { 'wavemeter1': 'WavemeterInterface', 'counterlogic': 'CounterLogic', 'savelogic': 'SaveLogic', 'fitlogic': 'FitLogic' } def __init__(self, config, **kwargs): """ Create WavemeterLoggerLogic object with connectors. @param dict config: module configuration @param dict kwargs: optional parameters """ super().__init__(config=config, **kwargs) # locking for thread safety self.threadlock = Mutex() if 'logic_acquisition_timing' in config.keys(): self._logic_acquisition_timing = config['logic_acquisition_timing'] else: self._logic_acquisition_timing = 20. self.log.warning('No logic_acquisition_timing configured, ' 'using {} instead.'.format(self._logic_acquisition_timing) ) if 'logic_update_timing' in config.keys(): self._logic_update_timing = config['logic_update_timing'] else: self._logic_update_timing = 100. self.log.warning('No logic_update_timing configured, ' 'using {} instead.'.format(self._logic_update_timing) ) self._acqusition_start_time = 0 self._bins = 200 self._data_index = 0 self._recent_wavelength_window = [0, 0] self.counts_with_wavelength = [] self._xmin = 650 self._xmax = 750 # internal min and max wavelength determined by the measured wavelength self.intern_xmax = -1.0 self.intern_xmin = 1.0e10 self.current_wavelength = 0 def on_activate(self): """ Initialisation performed during activation of the module. """ self._wavelength_data = [] self.stopRequested = False self._wavemeter_device = self.get_connector('wavemeter1') # print("Counting device is", self._counting_device) self._save_logic = self.get_connector('savelogic') self._counter_logic = self.get_connector('counterlogic') self._fit_logic = self.get_connector('fitlogic') self.fc = self._fit_logic.make_fit_container('Wavemeter counts', '1d') self.fc.set_units(['Hz', 'c/s']) if 'fits' in self._statusVariables and isinstance(self._statusVariables['fits'], dict): self.fc.load_from_dict(self._statusVariables['fits']) else: d1 = OrderedDict() d1['Lorentzian peak'] = { 'fit_function': 'lorentzian', 'estimator': 'peak' } d1['Two Lorentzian peaks'] = { 'fit_function': 'lorentziandouble', 'estimator': 'peak' } d1['Two Gaussian peaks'] = { 'fit_function': 'gaussiandouble', 'estimator': 'peak' } default_fits = OrderedDict() default_fits['1d'] = d1 self.fc.load_from_dict(default_fits) # create a new x axis from xmin to xmax with bins points self.histogram_axis = np.arange( self._xmin, self._xmax, (self._xmax - self._xmin) / self._bins ) self.histogram = np.zeros(self.histogram_axis.shape) self.envelope_histogram = np.zeros(self.histogram_axis.shape) self.sig_update_histogram_next.connect( self._attach_counts_to_wavelength, QtCore.Qt.QueuedConnection ) # fit data self.wlog_fit_x = np.linspace(self._xmin, self._xmax, self._bins*5) self.wlog_fit_y = np.zeros(self.wlog_fit_x.shape) # create an indepentent thread for the hardware communication self.hardware_thread = QtCore.QThread() # create an object for the hardware communication and let it live on the new thread self._hardware_pull = HardwarePull(self) self._hardware_pull.moveToThread(self.hardware_thread) # connect the signals in and out of the threaded object self.sig_handle_timer.connect(self._hardware_pull.handle_timer) # start the event loop for the hardware self.hardware_thread.start() self.last_point_time = time.time() def on_deactivate(self): """ Deinitialisation performed during deactivation of the module. """ if self.getState() != 'idle' and self.getState() != 'deactivated': self.stop_scanning() self.hardware_thread.quit() self.sig_handle_timer.disconnect() if len(self.fc.fit_list) > 0: self._statusVariables['fits'] = self.fc.save_to_dict() def get_max_wavelength(self): """ Current maximum wavelength of the scan. @return float: current maximum wavelength """ return self._xmax def get_min_wavelength(self): """ Current minimum wavelength of the scan. @return float: current minimum wavelength """ return self._xmin def get_bins(self): """ Current number of bins in the spectrum. @return int: current number of bins in the scan """ return self._bins def recalculate_histogram(self, bins=None, xmin=None, xmax=None): """ Recalculate the current spectrum from raw data. @praram int bins: new number of bins @param float xmin: new minimum wavelength @param float xmax: new maximum wavelength """ if bins is not None: self._bins = bins if xmin is not None: self._xmin = xmin if xmax is not None: self._xmax = xmax # create a new x axis from xmin to xmax with bins points self.rawhisto = np.zeros(self._bins) self.envelope_histogram = np.zeros(self._bins) self.sumhisto = np.ones(self._bins) * 1.0e-10 self.histogram_axis = np.linspace(self._xmin, self._xmax, self._bins) self.sig_update_histogram_next.emit(True) def get_fit_functions(self): """ Return the names of all ocnfigured fit functions. @return list(str): list of fit function names """ return self.fc.fit_list.keys() def do_fit(self): """ Execute the currently configured fit """ self.wlog_fit_x, self.wlog_fit_y, result = self.fc.do_fit( self.histogram_axis, self.histogram ) self.sig_fit_updated.emit() self.sig_data_updated.emit() def start_scanning(self, resume=False): """ Prepare to start counting: zero variables, change state and start counting "loop" @param bool resume: whether to resume measurement """ self.run() if self._counter_logic.getState() == 'idle': self._counter_logic.startCount() if self._counter_logic.get_saving_state(): self._counter_logic.save_data() self._wavemeter_device.start_acqusition() self._counter_logic.start_saving(resume=resume) if not resume: self._acqusition_start_time = self._counter_logic._saving_start_time self._wavelength_data = [] self.data_index = 0 self._recent_wavelength_window = [0, 0] self.counts_with_wavelength = [] self.rawhisto = np.zeros(self._bins) self.sumhisto = np.ones(self._bins) * 1.0e-10 self.intern_xmax = -1.0 self.intern_xmin = 1.0e10 self.recent_avg = [0, 0, 0] self.recent_count = 0 # start the measuring thread self.sig_handle_timer.emit(True) self._complete_histogram = True self.sig_update_histogram_next.emit(False) return 0 def stop_scanning(self): """ Set a flag to request stopping counting. """ if not self.getState() == 'idle': # self._wavemeter_device.stop_acqusition() # stop the measurement thread self.sig_handle_timer.emit(False) # set status to idle again self.stop() if self._counter_logic.get_saving_state(): self._counter_logic.save_data(to_file=False) return 0 def _attach_counts_to_wavelength(self, complete_histogram): """ Interpolate a wavelength value for each photon count value. This process assumes that the wavelength is varying smoothly and fairly continuously, which is sensible for most measurement conditions. Recent count values are those recorded AFTER the previous stitch operation, but BEFORE the most recent wavelength value (do not extrapolate beyond the current wavelength information). """ # If there is not yet any wavelength data, then wait and signal next loop if len(self._wavelength_data) == 0: time.sleep(self._logic_update_timing * 1e-3) self.sig_data_updated.emit() return # The end of the recent_wavelength_window is the time of the latest wavelength data self._recent_wavelength_window[1] = self._wavelength_data[-1][0] # (speed-up) We only need to worry about "recent" counts, because as the count data gets # very long all the earlier points will already be attached to wavelength values. count_recentness = 100 # TODO: calculate this from count_freq and wavemeter refresh rate # TODO: Does this depend on things, or do we loop fast enough to get every wavelength value? wavelength_recentness = np.min([5, len(self._wavelength_data)]) recent_counts = np.array(self._counter_logic._data_to_save[-count_recentness:]) recent_wavelengths = np.array(self._wavelength_data[-wavelength_recentness:]) # The latest counts are those recorded during the recent_wavelength_window count_idx = [0, 0] count_idx[0] = np.searchsorted(recent_counts[:, 0], self._recent_wavelength_window[0]) count_idx[1] = np.searchsorted(recent_counts[:, 0], self._recent_wavelength_window[1]) latest_counts = recent_counts[count_idx[0]:count_idx[1]] # Interpolate to obtain wavelength values at the times of each count interpolated_wavelengths = np.interp(latest_counts[:, 0], xp=recent_wavelengths[:, 0], fp=recent_wavelengths[:, 1] ) # Stitch interpolated wavelength into latest counts array latest_stitched_data = np.insert(latest_counts, 2, values=interpolated_wavelengths, axis=1) # Add this latest data to the list of counts vs wavelength self.counts_with_wavelength += latest_stitched_data.tolist() # The start of the recent data window for the next round will be the end of this one. self._recent_wavelength_window[0] = self._recent_wavelength_window[1] # Run the old update histogram method to keep duplicate data self._update_histogram(complete_histogram) # Signal that data has been updated self.sig_data_updated.emit() # Wait and repeat if measurement is ongoing time.sleep(self._logic_update_timing * 1e-3) if self.getState() == 'running': self.sig_update_histogram_next.emit(False) def _update_histogram(self, complete_histogram): """ Calculate new points for the histogram. @param bool complete_histogram: should the complete histogram be recalculated, or just the most recent data? @return: """ # If things like num_of_bins have changed, then recalculate the complete histogram # Note: The histogram may be recalculated (bins changed, etc) from the stitched data. # There is no need to recompute the interpolation for the stitched data. if complete_histogram: count_window = len(self._counter_logic._data_to_save) self._data_index = 0 self.log.info('Recalcutating Laser Scanning Histogram for: ' '{0:d} counts and {1:d} wavelength.'.format( count_window, len(self._wavelength_data) ) ) else: count_window = min(100, len(self._counter_logic._data_to_save)) if count_window < 2: time.sleep(self._logic_update_timing * 1e-3) self.sig_update_histogram_next.emit(False) return temp = np.array(self._counter_logic._data_to_save[-count_window:]) # only do something if there is wavelength data to work with if len(self._wavelength_data) > 0: for i in self._wavelength_data[self._data_index:]: self._data_index += 1 if i[1] < self._xmin or i[1] > self._xmax: continue # calculate the bin the new wavelength needs to go in newbin = np.digitize([i[1]], self.histogram_axis)[0] # if the bin make no sense, start from the beginning if newbin > len(self.rawhisto) - 1: continue # sum the counts in rawhisto and count the occurence of the bin in sumhisto interpolation = np.interp(i[0], xp=temp[:, 0], fp=temp[:, 1]) self.rawhisto[newbin] += interpolation self.sumhisto[newbin] += 1.0 self.envelope_histogram[newbin] = np.max([interpolation, self.envelope_histogram[newbin] ]) datapoint = [i[1], i[0], interpolation] if time.time() - self.last_point_time > 1: self.sig_new_data_point.emit(self.recent_avg) self.last_point_time = time.time() self.recent_count = 0 else: self.recent_count += 1 for j in range(3): self.recent_avg[j] -= self.recent_avg[j] / self.recent_count self.recent_avg[j] += datapoint[j] / self.recent_count # the plot data is the summed counts divided by the occurence of the respective bins self.histogram = self.rawhisto / self.sumhisto def save_data(self, timestamp=None): """ Save the counter trace data and writes it to a file. @param datetime timestamp: timestamp passed from gui so that saved images match filenames of data. This will be removed when savelogic handles the image creation also. @return int: error code (0:OK, -1:error) """ self._saving_stop_time = time.time() filepath = self._save_logic.get_path_for_module(module_name='WavemeterLogger') filelabel = 'wavemeter_log_histogram' # Currently need to pass timestamp from gui so that the saved image matches saved data. # TODO: once the savelogic saves images, we can revert this to always getting timestamp here. if timestamp is None: timestamp = datetime.datetime.now() # prepare the data in a dict or in an OrderedDict: data = OrderedDict() data['Wavelength (nm)'] = np.array(self.histogram_axis) data['Signal (counts/s)'] = np.array(self.histogram) # write the parameters: parameters = OrderedDict() parameters['Bins (#)'] = self._bins parameters['Xmin (nm)'] = self._xmin parameters['XMax (nm)'] = self._xmax parameters['Start Time (s)'] = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(self._acqusition_start_time) ) parameters['Stop Time (s)'] = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(self._saving_stop_time) ) self._save_logic.save_data(data, filepath=filepath, parameters=parameters, filelabel=filelabel, timestamp=timestamp, fmt='%.6e') filelabel = 'wavemeter_log_wavelength' # prepare the data in a dict or in an OrderedDict: data = OrderedDict() data['Time (s), Wavelength (nm)'] = self._wavelength_data # write the parameters: parameters = OrderedDict() parameters['Acquisition Timing (ms)'] = self._logic_acquisition_timing parameters['Start Time (s)'] = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(self._acqusition_start_time) ) parameters['Stop Time (s)'] = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(self._saving_stop_time) ) self._save_logic.save_data(data, filepath=filepath, parameters=parameters, filelabel=filelabel, timestamp=timestamp, fmt='%.6e') filelabel = 'wavemeter_log_counts' # prepare the data in a dict or in an OrderedDict: data = OrderedDict() data['Time (s),Signal (counts/s)'] = self._counter_logic._data_to_save # write the parameters: parameters = OrderedDict() parameters['Start counting time (s)'] = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(self._counter_logic._saving_start_time)) parameters['Stop counting time (s)'] = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(self._saving_stop_time)) parameters['Length of counter window (# of events)'] = self._counter_logic._count_length parameters['Count frequency (Hz)'] = self._counter_logic._count_frequency parameters['Oversampling (Samples)'] = self._counter_logic._counting_samples parameters['Smooth Window Length (# of events)'] = self._counter_logic._smooth_window_length self._save_logic.save_data(data, filepath=filepath, parameters=parameters, filelabel=filelabel, timestamp=timestamp, fmt='%.6e') self.log.debug('Laser Scan saved to:\n{0}'.format(filepath)) filelabel = 'wavemeter_log_counts_with_wavelength' # prepare the data in a dict or in an OrderedDict: data = OrderedDict() data['Measurement Time (s), Signal (counts/s), Interpolated Wavelength (nm)'] = np.array(self.counts_with_wavelength) fig = self.draw_figure() # write the parameters: parameters = OrderedDict() parameters['Start Time (s)'] = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(self._acqusition_start_time) ) parameters['Stop Time (s)'] = time.strftime('%d.%m.%Y %Hh:%Mmin:%Ss', time.localtime(self._saving_stop_time) ) self._save_logic.save_data(data, filepath=filepath, parameters=parameters, filelabel=filelabel, timestamp=timestamp, plotfig=fig, fmt='%.6e') plt.close(fig) return 0 def draw_figure(self): """ Draw figure to save with data file. @return: fig fig: a matplotlib figure object to be saved to file. """ # TODO: Draw plot for second APD if it is connected wavelength_data = [entry[2] for entry in self.counts_with_wavelength] count_data = np.array([entry[1] for entry in self.counts_with_wavelength]) # Index of max counts, to use to position "0" of frequency-shift axis count_max_index = count_data.argmax() # Scale count values using SI prefix prefix = ['', 'k', 'M', 'G'] prefix_index = 0 while np.max(count_data) > 1000: count_data = count_data / 1000 prefix_index = prefix_index + 1 counts_prefix = prefix[prefix_index] # Use qudi style plt.style.use(self._save_logic.mpl_qd_style) # Create figure fig, ax = plt.subplots() ax.plot(wavelength_data, count_data, linestyle=':', linewidth=0.5) ax.set_xlabel('wavelength (nm)') ax.set_ylabel('Fluorescence (' + counts_prefix + 'c/s)') x_formatter = mpl.ticker.ScalarFormatter(useOffset=False) ax.xaxis.set_major_formatter(x_formatter) ax2 = ax.twiny() nm_xlim = ax.get_xlim() ghz_at_max_counts = self.nm_to_ghz(wavelength_data[count_max_index]) ghz_min = self.nm_to_ghz(nm_xlim[0]) - ghz_at_max_counts ghz_max = self.nm_to_ghz(nm_xlim[1]) - ghz_at_max_counts ax2.set_xlim(ghz_min, ghz_max) ax2.set_xlabel('Shift (GHz)') return fig def nm_to_ghz(self, wavelength): """ Convert wavelength to frequency. @param float wavelength: vacuum wavelength @return float: freequency """ return 3e8 / wavelength
tobiasgehring/qudi
logic/wavemeter_logger_logic.py
Python
gpl-3.0
25,790
[ "Gaussian" ]
bc91c622b317ff92fd1c50899caf767f6164793af69fabd13c99e5bd7c9a52ed
""" Library for converting units and creating numpy arrays with automatic unit conversion. The conversion factors are taken directly from SIESTA which means that the number of significant digits are not exact. """ import numpy as _np from copy import copy,deepcopy _def_L = 'Bohr' _def_E = 'Ry' _def_f = 'Ry/Bohr' _def_T = 'K' _def_t = 'fs' _def_M = 'amu' Bohr = 1.0 Ry = 1.0 fs = 1.0 Ang = 1. / 0.529177 eV = 1. / 13.60580 hbar = 6.58211928e-16 * eV * 1.e15 kBar = 1. / 1.47108e5 GPa = kBar * 10. Kelvin = eV / 11604.45 Debye = 0.393430 amu = 2.133107 pi = 3.14159265358979323846264338327950288419716939937510 deg = pi / 180. _ConversionTable = { 'mass' : { 'DEFAULT' : _def_M, 'kg' : 1., 'g' : 1.e-3, 'amu': 1.66054e-27, }, 'length' : { 'DEFAULT' : _def_L, 'm' : 1., 'cm' : 0.01, 'nm' : 1.e-9, 'Ang' : 1.e-10, 'Bohr' : 0.529177e-10, }, 'time' : { 'DEFAULT' : _def_t, 's' : 1. , 'fs' : 1.e-15 , 'ps' : 1.e-12 , 'ns' : 1.e-9 , }, 'energy' : { 'DEFAULT' : _def_E, 'J' : 1., 'erg' : 1.e-7, 'eV' : 1.60219e-19, 'meV' : 1.60219e-22, 'Ry' : 2.17991e-18, 'mRy' : 2.17991e-21, 'Hartree' : 4.35982e-18, 'K' : 1.38066e-23, 'cm**-1' : 1.986e-23, 'kJ/mol' : 1.6606e-21, 'Hz' : 6.6262e-34, 'THz' : 6.6262e-22, 'cm-1' : 1.986e-23, 'cm^-1' : 1.986e-23, }, 'force' : { 'DEFAULT' : _def_f, 'N' : 1., 'eV/Ang' : 1.60219e-9, 'eV/Bohr' : 1.60219e-9*0.529177, 'Ry/Bohr' : 4.11943e-8, 'Ry/Ang' : 4.11943e-8/0.529177, } } # from http://physics.nist.gov/PhysRefData/Elements/ __atom = { 1 : { 'Z' : 1 , 'name' : 'H' , 'amu' : 1.007947 }, 2 : { 'Z' : 2 , 'name' : 'He', 'amu' : 4.002602 }, 3 : { 'Z' : 3 , 'name' : 'Li', 'amu' : 6.9412 }, 4 : { 'Z' : 4 , 'name' : 'Be', 'amu' : 9.012182 }, 5 : { 'Z' : 5 , 'name' : 'B' , 'amu' : 10.8117 }, 6 : { 'Z' : 6 , 'name' : 'C' , 'amu' : 12.01078 }, 7 : { 'Z' : 7 , 'name' : 'N' , 'amu' : 14.00672 }, 8 : { 'Z' : 8 , 'name' : 'O' , 'amu' : 15.99943 }, 9 : { 'Z' : 9 , 'name' : 'F' , 'amu' : 18.9984032 }, 10 : { 'Z' : 10 , 'name' : 'Ne', 'amu' : 20.1797 }, 11 : { 'Z' : 11 , 'name' : 'Na', 'amu' : 22.989770 }, 12 : { 'Z' : 12 , 'name' : 'Mg', 'amu' : 24.30506 }, 13 : { 'Z' : 13 , 'name' : 'Al', 'amu' : 26.9815382 }, 14 : { 'Z' : 14 , 'name' : 'Si', 'amu' : 28.0855 }, 15 : { 'Z' : 15 , 'name' : 'P' , 'amu' : 30.973761 }, 16 : { 'Z' : 16 , 'name' : 'S' , 'amu' : 32.0655 }, 17 : { 'Z' : 17 , 'name' : 'Cl', 'amu' : 35.453 }, 18 : { 'Z' : 18 , 'name' : 'Ar', 'amu' : 39.948 }, 19 : { 'Z' : 19 , 'name' : 'K' , 'amu' : 39.0983 }, 20 : { 'Z' : 20 , 'name' : 'Ca', 'amu' : 40.0784 }, 21 : { 'Z' : 21 , 'name' : 'Sc', 'amu' : 44.955912 }, 22 : { 'Z' : 22 , 'name' : 'Ti', 'amu' : 47.867 }, 23 : { 'Z' : 23 , 'name' : 'V' , 'amu' : 50.9415 }, 24 : { 'Z' : 24 , 'name' : 'Cr', 'amu' : 51.99616 }, 25 : { 'Z' : 25 , 'name' : 'Mn', 'amu' : 54.9380499 }, 26 : { 'Z' : 26 , 'name' : 'Fe', 'amu' : 55.8452 }, 27 : { 'Z' : 27 , 'name' : 'Co', 'amu' : 58.933200 }, 28 : { 'Z' : 28 , 'name' : 'Ni', 'amu' : 58.69342 }, 29 : { 'Z' : 29 , 'name' : 'Cu', 'amu' : 63.5463 }, 30 : { 'Z' : 30 , 'name' : 'Zn', 'amu' : 65.4094 }, 31 : { 'Z' : 31 , 'name' : 'Ga', 'amu' : 69.7231 }, 32 : { 'Z' : 32 , 'name' : 'Ge', 'amu' : 72.64 }, 33 : { 'Z' : 33 , 'name' : 'As', 'amu' : 74.92160 }, 34 : { 'Z' : 34 , 'name' : 'Se', 'amu' : 78.96 }, 35 : { 'Z' : 35 , 'name' : 'Br', 'amu' : 79.904 }, 36 : { 'Z' : 36 , 'name' : 'Kr', 'amu' : 83.798 }, 37 : { 'Z' : 37 , 'name' : 'Rb', 'amu' : 85.4678 }, 38 : { 'Z' : 38 , 'name' : 'Sr', 'amu' : 87.62 }, 39 : { 'Z' : 39 , 'name' : 'Y' , 'amu' : 88.90585 }, 40 : { 'Z' : 40 , 'name' : 'Zr', 'amu' : 91.224 }, 41 : { 'Z' : 41 , 'name' : 'Nb', 'amu' : 92.90638 }, 42 : { 'Z' : 42 , 'name' : 'Mo', 'amu' : 95.96 }, 44 : { 'Z' : 44 , 'name' : 'Ru', 'amu' : 101.07 }, 45 : { 'Z' : 45 , 'name' : 'Rh', 'amu' : 102.90550 }, 46 : { 'Z' : 46 , 'name' : 'Pd', 'amu' : 106.42 }, 47 : { 'Z' : 47 , 'name' : 'Ag', 'amu' : 107.8682 }, 48 : { 'Z' : 48 , 'name' : 'Cd', 'amu' : 112.411 }, 49 : { 'Z' : 49 , 'name' : 'In', 'amu' : 114.818 }, 50 : { 'Z' : 50 , 'name' : 'Sn', 'amu' : 118.710 }, 51 : { 'Z' : 51 , 'name' : 'Sb', 'amu' : 121.760 }, 52 : { 'Z' : 52 , 'name' : 'Te', 'amu' : 127.60 }, 53 : { 'Z' : 53 , 'name' : 'I' , 'amu' : 126.90447 }, 54 : { 'Z' : 54 , 'name' : 'Xe', 'amu' : 131.293 }, 55 : { 'Z' : 55 , 'name' : 'Cs', 'amu' : 132.9054519 }, 56 : { 'Z' : 56 , 'name' : 'Ba', 'amu' : 137.327 }, 57 : { 'Z' : 57 , 'name' : 'La', 'amu' : 138.905477 }, 58 : { 'Z' : 58 , 'name' : 'Ce', 'amu' : 140.116 }, 59 : { 'Z' : 59 , 'name' : 'Pr', 'amu' : 140.90765 }, 60 : { 'Z' : 60 , 'name' : 'Nd', 'amu' : 144.242 }, 62 : { 'Z' : 62 , 'name' : 'Sm', 'amu' : 150.36 }, 63 : { 'Z' : 63 , 'name' : 'Eu', 'amu' : 151.964 }, 64 : { 'Z' : 64 , 'name' : 'Gd', 'amu' : 157.25 }, 65 : { 'Z' : 65 , 'name' : 'Tb', 'amu' : 158.92535 }, 66 : { 'Z' : 66 , 'name' : 'Dy', 'amu' : 162.500 }, 67 : { 'Z' : 67 , 'name' : 'Ho', 'amu' : 164.93032 }, 68 : { 'Z' : 68 , 'name' : 'Er', 'amu' : 167.259 }, 69 : { 'Z' : 69 , 'name' : 'Tm', 'amu' : 168.93421 }, 70 : { 'Z' : 70 , 'name' : 'Yb', 'amu' : 173.054 }, 71 : { 'Z' : 71 , 'name' : 'Lu', 'amu' : 174.9668 }, 72 : { 'Z' : 72 , 'name' : 'Hf', 'amu' : 178.49 }, 73 : { 'Z' : 73 , 'name' : 'Ta', 'amu' : 180.94788 }, 74 : { 'Z' : 74 , 'name' : 'W' , 'amu' : 183.84 }, 75 : { 'Z' : 75 , 'name' : 'Re', 'amu' : 186.207 }, 76 : { 'Z' : 76 , 'name' : 'Os', 'amu' : 190.23 }, 77 : { 'Z' : 77 , 'name' : 'Ir', 'amu' : 192.217 }, 78 : { 'Z' : 78 , 'name' : 'Pt', 'amu' : 195.0782 }, 79 : { 'Z' : 79 , 'name' : 'Au', 'amu' : 196.966552 }, 80 : { 'Z' : 80 , 'name' : 'Hg', 'amu' : 200.59 }, 81 : { 'Z' : 81 , 'name' : 'Tl', 'amu' : 204.3833 }, 82 : { 'Z' : 82 , 'name' : 'Pb', 'amu' : 207.2 }, 83 : { 'Z' : 83 , 'name' : 'Bi', 'amu' : 208.98040 }, } # Apply the names to the dictionary so that lookups can be made from index or from names => # __atom['He'] == __atom[2] __atom.update(dict([__atom[k]['name'],v] for k,v in __atom.iteritems())) # 1001 : 2.016, # Deuterium # 2001 : 15.99943, # FO mix: (1-x) O + x F, x = 0.000 # 2002 : 16.186865825, # x = 0.063 # 2003 : 16.37430165, # 0.125 # 2004 : 16.7491733, # 0.250 # 2005 : 16.59922464, # 0.200 # 2006 : 16.89912196 # 0.300 # Perhaps this should be alterred into a class so users # can append other elements? def AtomMass(atom,unit='amu'): return __atom[atom]['amu'] * UnitConvert('amu',Unit(unit)) def AtomName(atom): return __atom[atom]['name'] def AtomZ(atom): return __atom[atom]['Z'] # Here we start the unit type conversion library class UnknownUnitTypeError(Exception): """ Error raised when unittype of a unit cannot be found. """ pass # We utilize the generic interface def UnitType(unit): """ Returns the type of unit that is associated with input unit. Parameters ---------- unit : str unit, e.g. kg, Ang, eV etc. returns the Examples -------- >>> import sids.helper.units as shu >>> shu.UnitType('kg') 'mass' >>> shu.UnitType('eV') 'energy' """ for k in _ConversionTable: try: if unit['unit'] in _ConversionTable[k]: return k except: try: if unit in _ConversionTable[k]: return k except: pass raise UnknownUnitTypeError('The unit "'+str(k)+'" could not be located in the table.') class UnknownUnitError(Exception): """ Error raised when a unit cannot be found. """ pass # We utilize the generic interface def UnitConvert(fr,to,opts={}): """ Returns the factor that takes 'fr' to the units of 'to'. Parameters ---------- fr : starting unit to : ending unit opts : controls whether the unit conversion is in powers or fractional units Examples ------- >>> import sids.helper.units as shu >>> shu.UnitConvert('kg','g') 1000 >>> shu.UnitConvert('eV','J') 1.60219e-19 """ # In the case that the conversion to is None, we should do nothing. if to is None: return 1. fr = Unit(fr) # ensure that it is a unit to = Unit(to) # ensure that it is a unit frU = None ; toU = None frV = None ; toV = None # Check that the unit types live in the same # space # TODO this currently does not handle if powers are taken into # consideration. for k in _ConversionTable: if fr.unit in _ConversionTable[k]: frU = k frV = _ConversionTable[k][fr.unit] if to.unit in _ConversionTable[k]: toU = k toV = _ConversionTable[k][to.unit] if frU != toU: raise Exception('The unit conversion is not from the same group: '+frU+' to '+toU) # Calculate conversion factor val = frV / toV for opt in ['^','power','p']: if opt in opts: val = val ** opts[opt] for opt in ['*','factor','fac']: if opt in opts: val = val * opts[opt] for opt in ['/','divide','div']: if opt in opts: val = val / opts[opt] return val # A single unit-object. # Contains functions to compare and convert a unit # to another unit. class Unit(object): """ Container for the unit and the conversion factors etc. This will make it easier to maintain the units, and eventually change the usage. """ def __new__(cls,*args,**kwargs): if isinstance(args[0],Unit): return args[0] #print('Creating new unit:',args) obj = object.__new__(cls) if len(args) == 1: # We are creating a unit without a variable name obj.variable = None obj.unit = args[0] else: obj.variable = args[0] # Typical case when passing a unit from another variable... if isinstance(args[1],Unit): obj.unit = args[1].unit else: obj.unit = args[1] # We need to handle some type of operator definitions # But how to handle them? for op in ['**','^','/','*']: pass return obj def type(self): """ Returns the type of unit this is, i.e. energy, length, time, etc. """ for k,v in _ConversionTable.iteritems(): if self.unit in v: return k def SI(self): """ Returns the SI conversion factor for the unit """ for k,v in _ConversionTable.iteritems(): if self.variable in v: return v[self.variable] def convert(self,to): """ Convert this unit to another and returns the conversion factor. """ u = Unit(to) # This will raise an exception if the units are not of same type... conv = UnitConvert(self.unit,u.unit) #print('Converting:',self.variable,self.unit,u.unit) self.unit = deepcopy(u.unit) return conv def copy(self): """Method for copying the unit """ return deepcopy(self) def __repr__(self): """ Return the unit in string format (XML type-like)""" return "<Unit variable='"+str(self.variable)+"' unit='"+str(self.unit)+"'/>" def __eq__(self,other): """ Returns true if the variable is the same as the other """ return self.variable == other.variable def __copy__(self): return Unit(copy(self.variable),copy(self.unit)) def __deepcopy__(self, memo): return Unit(deepcopy(self.variable),deepcopy(self.unit)) class Units(object): """ Container for many units. This will make it easier to maintain the units, and eventually change the usage. """ def __new__(cls,*args): # Convert the tuple to a list... obj = object.__new__(cls) # The args are a list of Unit-objects, or a list of pairs which should be converted to a list of units. units = [] i = 0 while i < len(args): if isinstance(args[i],Unit): units.append(deepcopy(args[i])) else: assert i < len(args)-1, 'Can not grap a unit for: ' + str(args[i]) units.append(deepcopy(Unit(args[i],args[i+1]))) i += 1 i += 1 obj._units = units return obj def append(self,unit): """ Append a unit object """ # We cannot have to similar units assigned... if isinstance(unit,Units): for au in unit: # Use the recursive routine (keep it simple) self.append(au) else: for u in self: if u == unit: raise Exception('Can not append a unit which already exists. Do not assign dublicate variables') self._units.append(deepcopy(unit)) def update(self,unit): """ Updates unit object, adds it if it does not exist """ if unit is None: return if isinstance(unit,Units): for u in unit: self.update(u) else: for u in self: if u.variable == unit.variable: u.unit = deepcopy(unit.unit) return self.append(unit) def unit(self,variable): """ Returns the unit object associated with the variable named variable""" # if it is none, return fast. if not variable: return None for i in self: if i.variable == variable: return i return None def copy(self): """ Copies this unit segment """ return deepcopy(self) ################# # General routines overwriting python models ################# def __len__(self): return len(self._units) def __contains__(self,item): if isinstance(item,Unit): u = Unit(item.variable,None) else: u = Unit(item,None) for unit in self: if u.variable == unit.variable: return True return False def __repr__(self): """ Return the unit in string format (XML type-like)""" tmp = '<Units>' for unit in self: tmp += '\n ' + str(unit) tmp += '\n</Units>' return tmp def __iter__(self): """ An iterator of the Units collection """ for unit in self._units: yield unit def __delitem__(self,variable): """ Remove the variable from the units list. """ for i in range(len(self)): if self._units[i].variable == variable: del self._units[i] return # We need to overwrite the copy mechanisms. # It really is a pain in the ass, but it works. # Luckily all copying need only be refered in the Unit-object. def __copy__(self): units = Units() for unit in self: units.append(copy(unit)) return units def __deepcopy__(self, memo): units = Units() for unit in self: units.append(deepcopy(unit)) return units # Do NOT implement a 'convert' method. It could potentially lead to unexpected behaviour as the # Unit-object needs to handle this.... # TODO consider the conversion of a list of Unit-objects via the Units-object. class UnitObject(object): """ Contains relevant information about units etc. """ def convert(self,*units): """ Convert all entries in the object to the desired units given by the input. """ # Go back in the units variable does not exist. if not '_units' in self.__dict__: return # If it is a Units object, we can simply loop and do the recursive conversion. if isinstance(units[0],Units): for unit in units[0]: self.convert(unit) return # First convert all variables associated with a type... ('length',etc.) # This well enable one to convert all of length but still have a unit conversion of a # single length variable to another. for unit in units: u = Unit(unit) if not u.variable: for self_u in self._units: if self_u.type() == u.type(): self.__dict__[self_u.variable] *= self_u.convert(u) # Now convert the specific requested units. for unit in units: u = Unit(unit) self_u = self.unit(u.variable) if self_u: self.__dict__[self_u.variable] *= self_u.convert(u) def unit(self,variable): """ Returns the unit that is associated with the variable """ return self._units.unit(variable) @property def units(self): """ Returns the units that is associated with the variable """ return self._units class Variable_ndarray(_np.ndarray): """ Numpy array with automatic unit conversion. When two arrays are multiplied we can automatically detect units and convert to the correct units. Creating a variable with Variable_ndarray we gain access to convert which can convert the unit of the variable. """ def convert(self,unit): """ Convert all entries in the object to the desired units given by the input. """ # Go back in the units variable does not exist. if not '_units' in self.__dict__: return # If it is a Units object, # we can simply loop and do the recursive conversion. if isinstance(unit,Units): for u in unit: self.convert(u) return # Ensure that unit is a Unit u = Unit(unit) # Loop over all variables in this object. # It only has one for i in self._units: if i.type() == u.type(): self[:] *= i.convert(u) def add_unit(self,var,unit): """ Adds a unit to a variable beloning to the object """ def unit(self,variable='self'): """ Returns the unit that is associated with the variable """ return self._units.unit(variable) @property def units(self): """ Returns the units that is associated with the variable """ return self._units @staticmethod def _N(array): return _np.array(array) def __array_finalize__(self,obj): """ Finalize the array with the object """ if obj is None: return # Create the default units, we need to copy them, to ensure # that we do not attach the same objects. if hasattr(obj,'_units'): self._units = deepcopy(obj._units) else: self._units = deepcopy(self._UNITS) if hasattr(self,'__variable_finalize__'): self.__variable_finalize__()
zerothi/siesta-es
sids/helper/units.py
Python
gpl-3.0
19,656
[ "SIESTA" ]
a08dda7d660cb6a8b885a3c2687923977965e69fa2f55c496c645c66be975bf0
######################################################################## # $HeadURL$ # File : BuildCloudinitScript.py # Author : Victor Mendez ######################################################################## """ This class construct a cloudinit script for a DIRAC image and IaaS endpoint """ import os import sys # DIRAC from DIRAC import gLogger, S_OK, S_ERROR, gConfig __RCSID__ = "$Id: $" class BuildCloudinitScript: def buildCloudinitScript( self, imageConfig, endpointConfig, runningPodRequirements, instanceID = None ): # logger self.log = gLogger.getSubLogger( self.__class__.__name__ ) contextMethod = imageConfig[ 'contextMethod' ] if contextMethod == 'cloudinit': cvmfs_http_proxy = endpointConfig.get( 'cvmfs_http_proxy' ) siteName = endpointConfig.get( 'siteName' ) cloudDriver = endpointConfig.get( 'cloudDriver' ) vmStopPolicy = endpointConfig.get( 'vmStopPolicy' ) imageName = imageConfig.get( 'DIRACImageName' ) contextConfig = imageConfig.get( 'contextConfig' ) vmKeyPath = contextConfig[ 'vmKeyPath' ] vmCertPath = contextConfig[ 'vmCertPath' ] vmContextualizeScriptPath = contextConfig[ 'vmContextualizeScriptPath' ] vmRunJobAgentURL = contextConfig[ 'vmRunJobAgentURL' ] vmRunVmMonitorAgentURL = contextConfig[ 'vmRunVmMonitorAgentURL' ] vmRunVmUpdaterAgentURL = contextConfig[ 'vmRunVmUpdaterAgentURL' ] vmRunLogAgentURL = contextConfig[ 'vmRunLogAgentURL' ] vmCvmfsContextURL = contextConfig[ 'vmCvmfsContextURL' ] vmDiracContextURL = contextConfig[ 'vmDiracContextURL' ] result = self.__buildCloudinitScript( DIRACImageName = imageName, siteName = siteName, cloudDriver = cloudDriver, cvmfs_http_proxy = cvmfs_http_proxy, vmStopPolicy = vmStopPolicy, contextMethod = contextMethod, vmCertPath = vmCertPath, vmKeyPath = vmKeyPath, vmContextualizeScriptPath = vmContextualizeScriptPath, vmRunJobAgentURL = vmRunJobAgentURL, vmRunVmMonitorAgentURL = vmRunVmMonitorAgentURL, vmRunVmUpdaterAgentURL = vmRunVmUpdaterAgentURL, vmRunLogAgentURL = vmRunLogAgentURL, vmCvmfsContextURL = vmCvmfsContextURL, vmDiracContextURL = vmDiracContextURL, runningPodRequirements = runningPodRequirements, instanceID = instanceID ) elif contextMethod == 'ssh': result = S_ERROR( 'ssh context method found instead of cloudinit method' ) elif contextMethod == 'adhoc': result = S_ERROR( 'adhoc context method found instead of cloudinit method' ) elif contextMethod == 'amiconfig': result = S_ERROR( 'amiconfig context method found instead of cloudinit method' ) else: result = S_ERROR( '%s is not a known NovaContext method' % contextMethod ) return result def __buildCloudinitScript( self, DIRACImageName, siteName, cloudDriver, cvmfs_http_proxy, vmStopPolicy, contextMethod, vmCertPath, vmKeyPath, vmContextualizeScriptPath, vmRunJobAgentURL, vmRunVmMonitorAgentURL, vmRunVmUpdaterAgentURL, vmRunLogAgentURL, vmCvmfsContextURL, vmDiracContextURL, runningPodRequirements, instanceID ): # The function return S_OK with the name of the created cloudinit script # If the cloudinit context script was previously created, then overwriten cloudinitPath = '/tmp/cloudinit_' + DIRACImageName + '_' + siteName + '_' + str(instanceID) + '.sh' file=open(cloudinitPath, 'w') #start writing the script file.write('#!/bin/bash\n') #buildin the necesary arguments putCertPath = "/root/vmservicecert.pem" file.write('putCertPath=%s\n' % (putCertPath)) putKeyPath = "/root/vmservicekey.pem" file.write('putKeyPath=%s\n' % (putKeyPath)) file.write('vmRunJobAgentURL=%s\n' % (vmRunJobAgentURL)) file.write('vmRunVmMonitorAgentURL=%s\n' % (vmRunVmMonitorAgentURL)) file.write('vmRunVmUpdaterAgentURL=%s\n' % (vmRunVmUpdaterAgentURL)) file.write('vmRunLogAgentURL=%s\n' % (vmRunLogAgentURL)) file.write('vmCvmfsContextURL=%s\n' % (vmCvmfsContextURL)) file.write('vmDiracContextURL=%s\n' % (vmDiracContextURL)) file.write('cvmfs_http_proxy=\"%s\"\n' % (cvmfs_http_proxy)) file.write('siteName=%s\n' % (siteName)) file.write('cloudDriver=%s\n' % (cloudDriver)) file.write('vmStopPolicy=%s\n' % (vmStopPolicy)) file.write('instanceID=%s\n' % (instanceID)) # dynamic runningPod requirements for LocalSite file.write("cat << 'EOF' > /root/LocalSiteRequirements\n") for key, value in runningPodRequirements.items(): if type(value) is list: file.write('%s=%s\n' % (key,','.join(value))) else: file.write('%s=%s\n' % (key,value)) file.write("EOF\n") # 0) Previous copy of necessary files using build in cloudinit script # 0.1) DIRAC service public key pubkeyPath = os.path.expanduser( '~/.ssh/id_rsa.pub' ) file.write("cat << 'EOF' > /root/.ssh/authorized_keys\n") try: with open(pubkeyPath) as fp: for line in fp: file.write(line) except Exception, errmsg: return S_ERROR( errmsg ) file.write("EOF\n") # VM DIRAC service cert file.write("cat << 'EOF' > %s\n" % (putCertPath)) try: with open(vmCertPath) as fp: for line in fp: file.write(line) except Exception, errmsg: return S_ERROR( errmsg ) file.write("EOF\n") # VM DIRAC service key file.write("cat << 'EOF' > %s\n" % (putKeyPath)) try: with open(vmKeyPath) as fp: for line in fp: file.write(line) except Exception, errmsg: return S_ERROR( errmsg ) file.write("EOF\n") #now the static part of the cloudinit try: with open(vmContextualizeScriptPath) as fp: for line in fp: file.write(line) except Exception, errmsg: return S_ERROR( errmsg ) file.close() return S_OK(cloudinitPath) #............................................................................... #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
vmendez/VMDIRAC
WorkloadManagementSystem/Client/BuildCloudinitScript.py
Python
gpl-3.0
7,515
[ "DIRAC" ]
73ec951ebb2983b278711958ff80f2498cd5c9707ec8413bfe74f04c75653084
from math import pi try: import openmm.unit as u except ImportError: # OpenMM < 7.6 import simtk.unit as u kB = u.BOLTZMANN_CONSTANT_kB * u.AVOGADRO_CONSTANT_NA # OpenMM constant for Coulomb interactions in OpenMM units # (openmm/platforms/reference/include/SimTKOpenMMRealType.h) # TODO: Replace this with an import from openmm.constants once available E_CHARGE = 1.602176634e-19 * u.coulomb EPSILON0 = 1e-6*8.8541878128e-12/(u.AVOGADRO_CONSTANT_NA*E_CHARGE**2) * u.farad/u.meter ONE_4PI_EPS0 = 1/(4*pi*EPSILON0) * EPSILON0.unit # we need it unitless # Standard-state volume for a single molecule in a box of size (1 L) / (avogadros number). LITER = 1000.0 * u.centimeters**3 STANDARD_STATE_VOLUME = LITER / (u.AVOGADRO_CONSTANT_NA*u.mole)
choderalab/openmmtools
openmmtools/constants.py
Python
mit
754
[ "OpenMM" ]
eee11d88e2fefde2780a6940e6f4ddc94273066fc7117e407e9d16ca6c423dcb
# -*- coding: utf-8 -*- from __future__ import unicode_literals, division, absolute_import, print_function from builtins import * # pylint: disable=unused-import, redefined-builtin from past.builtins import basestring import logging import re import time from datetime import datetime, timedelta from dateutil.parser import parse as dateutil_parse from sqlalchemy import Table, Column, Integer, String, Unicode, Date, DateTime, Time, or_, and_ from sqlalchemy.orm import relation from sqlalchemy.schema import ForeignKey from flexget import db_schema from flexget import plugin from flexget.event import event from flexget.terminal import console from flexget.manager import Session from flexget.plugin import get_plugin_by_name from flexget.utils import requests from flexget.utils.database import with_session, json_synonym from flexget.utils.simple_persistence import SimplePersistence from flexget.utils.tools import TimedDict Base = db_schema.versioned_base('api_trakt', 6) AuthBase = db_schema.versioned_base('trakt_auth', 0) log = logging.getLogger('api_trakt') # Production Site CLIENT_ID = '57e188bcb9750c79ed452e1674925bc6848bd126e02bb15350211be74c6547af' CLIENT_SECRET = 'db4af7531e8df678b134dbc22445a2c04ebdbdd7213be7f5b6d17dfdfabfcdc2' API_URL = 'https://api-v2launch.trakt.tv/' PIN_URL = 'http://trakt.tv/pin/346' # Stores the last time we checked for updates for shows/movies updated = SimplePersistence('api_trakt') # Oauth account authentication class TraktUserAuth(AuthBase): __tablename__ = 'trakt_user_auth' account = Column(Unicode, primary_key=True) access_token = Column(Unicode) refresh_token = Column(Unicode) created = Column(DateTime) expires = Column(DateTime) def __init__(self, account, access_token, refresh_token, created, expires): self.account = account self.access_token = access_token self.refresh_token = refresh_token self.expires = token_expire_date(expires) self.created = token_created_date(created) def token_expire_date(expires): return datetime.now() + timedelta(seconds=expires) def token_created_date(created): return datetime.fromtimestamp(created) def device_auth(): data = {'client_id': CLIENT_ID} try: r = requests.post(get_api_url('oauth/device/code'), data=data).json() device_code = r['device_code'] user_code = r['user_code'] expires_in = r['expires_in'] interval = r['interval'] console('Please visit {0} and authorize Flexget. Your user code is {1}. Your code expires in ' '{2} minutes.'.format(r['verification_url'], user_code, expires_in / 60.0)) log.debug('Polling for user authorization.') data['code'] = device_code data['client_secret'] = CLIENT_SECRET end_time = time.time() + expires_in console('Waiting...', end='') # stop polling after expires_in seconds while time.time() < end_time: time.sleep(interval) polling_request = requests.post(get_api_url('oauth/device/token'), data=data, raise_status=False) if polling_request.status_code == 200: # success return polling_request.json() elif polling_request.status_code == 400: # pending -- waiting for user console('...', end='') elif polling_request.status_code == 404: # not found -- invalid device_code raise plugin.PluginError('Invalid device code. Open an issue on Github.') elif polling_request.status_code == 409: # already used -- user already approved raise plugin.PluginError('User code has already been approved.') elif polling_request.status_code == 410: # expired -- restart process break elif polling_request.status_code == 418: # denied -- user denied code raise plugin.PluginError('User code has been denied.') elif polling_request.status_code == 429: # polling too fast log.warning('Polling too quickly. Upping the interval. No action required.') interval += 1 raise plugin.PluginError('User code has expired. Please try again.') except requests.RequestException as e: raise plugin.PluginError('Device authorization with Trakt.tv failed: {0}'.format(e)) def token_auth(data): try: return requests.post(get_api_url('oauth/token'), data=data).json() except requests.RequestException as e: raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e)) def delete_account(account): with Session() as session: acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first() if not acc: raise plugin.PluginError('Account %s not found.' % account) session.delete(acc) def get_access_token(account, token=None, refresh=False, re_auth=False, called_from_cli=False): """ Gets authorization info from a pin or refresh token. :param account: Arbitrary account name to attach authorization to. :param unicode token: The pin or refresh token, as supplied by the trakt website. :param bool refresh: If True, refresh the access token using refresh_token from db. :param bool re_auth: If True, account is re-authorized even if it already exists in db. :raises RequestException: If there is a network error while authorizing. """ data = { 'client_id': CLIENT_ID, 'client_secret': CLIENT_SECRET } with Session() as session: acc = session.query(TraktUserAuth).filter(TraktUserAuth.account == account).first() if acc and datetime.now() < acc.expires and not refresh and not re_auth: return acc.access_token else: if acc and (refresh or datetime.now() >= acc.expires) and not re_auth: log.debug('Using refresh token to re-authorize account %s.', account) data['refresh_token'] = acc.refresh_token data['grant_type'] = 'refresh_token' token_dict = token_auth(data) elif token: # We are only in here if a pin was specified, so it's safe to use console instead of logging console('Warning: PIN authorization has been deprecated. Use Device Authorization instead.') data['code'] = token data['grant_type'] = 'authorization_code' data['redirect_uri'] = 'urn:ietf:wg:oauth:2.0:oob' token_dict = token_auth(data) elif called_from_cli: log.debug('No pin specified for an unknown account %s. Attempting to authorize device.', account) token_dict = device_auth() else: raise plugin.PluginError('Account %s has not been authorized. See `flexget trakt auth -h` on how to.' % account) try: access_token = token_dict['access_token'] refresh_token = token_dict['refresh_token'] created_at = token_dict.get('created_at', time.time()) expires_in = token_dict['expires_in'] if acc: acc.access_token = access_token acc.refresh_token = refresh_token acc.created = token_created_date(created_at) acc.expires = token_expire_date(expires_in) else: acc = TraktUserAuth(account, access_token, refresh_token, created_at, expires_in) session.add(acc) return access_token except requests.RequestException as e: raise plugin.PluginError('Token exchange with trakt failed: {0}'.format(e)) def set_image_attributes(obj, data): for image, images in data['images'].items(): for size, url in images.items(): setattr(obj, 'image_%s_%s' % (image, size), url) def make_list_slug(name): """Return the slug for use in url for given list name.""" slug = name.lower() # These characters are just stripped in the url for char in '!@#$%^*()[]{}/=?+\\|': slug = slug.replace(char, '') # These characters get replaced slug = slug.replace('&', 'and') slug = slug.replace(' ', '-') return slug def get_session(account=None, token=None): """ Creates a requests session ready to talk to trakt API with FlexGet's api key. Can also add user level authentication if `account` parameter is given. :param account: An account authorized via `flexget trakt auth` CLI command. If given, returned session will be authenticated for that account. """ # default to username if account name is not specified session = requests.Session() session.headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2', 'trakt-api-key': CLIENT_ID, } if account: access_token = get_access_token(account, token) if account else None if access_token: session.headers.update({'Authorization': 'Bearer %s' % access_token}) return session def get_api_url(*endpoint): """ Get the address of a trakt API endpoint. :param endpoint: Can by a string endpoint (e.g. 'sync/watchlist') or an iterable (e.g. ('sync', 'watchlist') Multiple parameters can also be specified instead of a single iterable. :returns: The absolute url to the specified API endpoint. """ if len(endpoint) == 1 and not isinstance(endpoint[0], basestring): endpoint = endpoint[0] # Make sure integer portions are turned into strings first too url = API_URL + '/'.join(map(str, endpoint)) return url @db_schema.upgrade('api_trakt') def upgrade(ver, session): if ver is None or ver <= 5: raise db_schema.UpgradeImpossible return ver def get_entry_ids(entry): """Creates a trakt ids dict from id fields on an entry. Prefers already populated info over lazy lookups.""" ids = {} for lazy in [False, True]: if entry.get('trakt_movie_id', eval_lazy=lazy): ids['trakt'] = entry['trakt_movie_id'] elif entry.get('trakt_show_id', eval_lazy=lazy): ids['trakt'] = entry['trakt_show_id'] elif entry.get('trakt_episode_id', eval_lazy=lazy): ids['trakt'] = entry['trakt_episode_id'] if entry.get('tmdb_id', eval_lazy=lazy): ids['tmdb'] = entry['tmdb_id'] if entry.get('tvdb_id', eval_lazy=lazy): ids['tvdb'] = entry['tvdb_id'] if entry.get('imdb_id', eval_lazy=lazy): ids['imdb'] = entry['imdb_id'] if entry.get('tvrage_id', eval_lazy=lazy): ids['tvrage'] = entry['tvrage_id'] if ids: break return ids class TraktMovieTranslation(Base): __tablename__ = 'trakt_movie_translations' id = Column(Integer, primary_key=True, autoincrement=True) language = Column(Unicode) overview = Column(Unicode) tagline = Column(Unicode) title = Column(Unicode) movie_id = Column(Integer, ForeignKey('trakt_movies.id')) def __init__(self, translation, session): super(TraktMovieTranslation, self).__init__() self.update(translation, session) def update(self, translation, session): for col in translation.keys(): setattr(self, col, translation.get(col)) class TraktShowTranslation(Base): __tablename__ = 'trakt_show_translations' id = Column(Integer, primary_key=True, autoincrement=True) language = Column(Unicode) overview = Column(Unicode) title = Column(Unicode) show_id = Column(Integer, ForeignKey('trakt_shows.id')) def __init__(self, translation, session): super(TraktShowTranslation, self).__init__() self.update(translation, session) def update(self, translation, session): for col in translation.keys(): setattr(self, col, translation.get(col)) def get_translations(ident, style): url = get_api_url(style + 's', ident, 'translations') trakt_translation = TraktShowTranslation if style == 'show' else TraktMovieTranslation trakt_translation_id = getattr(trakt_translation, style + '_id') translations = [] req_session = get_session() try: results = req_session.get(url, params={'extended': 'full,images'}).json() with Session() as session: for result in results: translation = session.query(trakt_translation).filter(and_( trakt_translation.language == result.get('language'), trakt_translation_id == ident)).first() if not translation: translation = trakt_translation(result, session) translations.append(translation) return translations except requests.RequestException as e: log.debug('Error adding translations to trakt id %s: %s', ident, e) class TraktGenre(Base): __tablename__ = 'trakt_genres' name = Column(Unicode, primary_key=True) show_genres_table = Table('trakt_show_genres', Base.metadata, Column('show_id', Integer, ForeignKey('trakt_shows.id')), Column('genre_id', Unicode, ForeignKey('trakt_genres.name'))) Base.register_table(show_genres_table) movie_genres_table = Table('trakt_movie_genres', Base.metadata, Column('movie_id', Integer, ForeignKey('trakt_movies.id')), Column('genre_id', Unicode, ForeignKey('trakt_genres.name'))) Base.register_table(movie_genres_table) class TraktActor(Base): __tablename__ = 'trakt_actors' id = Column(Integer, primary_key=True, nullable=False) name = Column(Unicode) slug = Column(Unicode) tmdb = Column(Integer) imdb = Column(Unicode) biography = Column(Unicode) birthday = Column(Date) death = Column(Date) homepage = Column(Unicode) image_headshot_full = Column(Unicode) image_headshot_medium = Column(Unicode) image_headshot_thumb = Column(Unicode) image_fanart_full = Column(Unicode) image_fanart_medium = Column(Unicode) image_fanart_thumb = Column(Unicode) def __init__(self, actor, session): super(TraktActor, self).__init__() self.update(actor, session) @property def main_image(self): for size in ['medium', 'full', 'thumb']: for image_type in ['headshot', 'fanart']: if getattr(self, 'image_%s_%s' % (image_type, size)) is not None: return getattr(self, 'image_%s_%s' % (image_type, size)) def update(self, actor, session): if self.id and self.id != actor.get('ids').get('trakt'): raise Exception('Tried to update db actors with different actor data') elif not self.id: self.id = actor.get('ids').get('trakt') self.name = actor.get('name') ids = actor.get('ids') self.imdb = ids.get('imdb') self.slug = ids.get('slug') self.tmdb = ids.get('tmdb') self.biography = actor.get('biography') if actor.get('birthday'): self.birthday = dateutil_parse(actor.get('birthday')) if actor.get('death'): self.death = dateutil_parse(actor.get('death')) self.homepage = actor.get('homepage') if actor.get('images'): set_image_attributes(self, actor) def to_dict(self): return { 'name': self.name, 'trakt_id': self.id, 'imdb_id': self.imdb, 'tmdb_id': self.tmdb, 'images': { 'headshot': { 'full': self.image_headshot_full, 'medium': self.image_headshot_medium, 'thumb': self.image_headshot_thumb }, 'fanart': { 'full': self.image_fanart_full, 'medium': self.image_fanart_medium, 'thumb': self.image_fanart_thumb } }, "main_image": self.main_image } show_actors_table = Table('trakt_show_actors', Base.metadata, Column('show_id', Integer, ForeignKey('trakt_shows.id')), Column('actors_id', Integer, ForeignKey('trakt_actors.id'))) Base.register_table(show_actors_table) movie_actors_table = Table('trakt_movie_actors', Base.metadata, Column('movie_id', Integer, ForeignKey('trakt_movies.id')), Column('actors_id', Integer, ForeignKey('trakt_actors.id'))) Base.register_table(movie_actors_table) def get_db_actors(ident, style): actors = [] url = get_api_url(style + 's', ident, 'people') req_session = get_session() try: results = req_session.get(url, params={'extended': 'full,images'}).json() with Session() as session: for result in results.get('cast'): trakt_id = result.get('person').get('ids').get('trakt') actor = session.query(TraktActor).filter(TraktActor.id == trakt_id).first() if not actor: actor = TraktActor(result.get('person'), session) actors.append(actor) return actors except requests.RequestException as e: log.debug('Error searching for actors for trakt id %s', e) return def get_translations_dict(translate, style): res = {} for lang in translate: info = { 'overview': lang.overview, 'title': lang.title, } if style == 'movie': info['tagline'] = lang.tagline res[lang.language] = info return res def list_actors(actors): res = {} for actor in actors: info = { 'trakt_id': actor.id, 'name': actor.name, 'imdb_id': str(actor.imdb), 'trakt_slug': actor.slug, 'tmdb_id': str(actor.tmdb), 'birthday': actor.birthday.strftime("%Y/%m/%d") if actor.birthday else None, 'biography': actor.biography, 'homepage': actor.homepage, 'death': actor.death.strftime("%Y/%m/%d") if actor.death else None, 'headshot_full': 'image_headshot_full', 'headshot_medium': 'image_headshot_medium', 'headshot_thumb': 'image_headshot_thumb', 'fanart_full': 'image_fanart_full', 'fanart_medium': 'image_fanart_medium', 'fanart_thumb': 'image_fanart_thumb', } res[str(actor.id)] = info return res class TraktEpisode(Base): __tablename__ = 'trakt_episodes' id = Column(Integer, primary_key=True, autoincrement=False) tvdb_id = Column(Integer) imdb_id = Column(Unicode) tmdb_id = Column(Integer) tvrage_id = Column(Unicode) title = Column(Unicode) season = Column(Integer) number = Column(Integer) number_abs = Column(Integer) overview = Column(Unicode) first_aired = Column(DateTime) updated_at = Column(DateTime) cached_at = Column(DateTime) image_screenshot_full = Column(Unicode) image_screenshot_medium = Column(Unicode) image_screenshot_thumb = Column(Unicode) series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=False) def __init__(self, trakt_episode, session): super(TraktEpisode, self).__init__() self.update(trakt_episode, session) def update(self, trakt_episode, session): """Updates this record from the trakt media object `trakt_movie` returned by the trakt api.""" if self.id and self.id != trakt_episode['ids']['trakt']: raise Exception('Tried to update db ep with different ep data') elif not self.id: self.id = trakt_episode['ids']['trakt'] self.imdb_id = trakt_episode['ids']['imdb'] self.tmdb_id = trakt_episode['ids']['tmdb'] self.tvrage_id = trakt_episode['ids']['tvrage'] if trakt_episode.get('images'): set_image_attributes(self, trakt_episode) self.tvdb_id = trakt_episode['ids']['tvdb'] self.first_aired = None if trakt_episode.get('first_aired'): self.first_aired = dateutil_parse(trakt_episode['first_aired'], ignoretz=True) self.updated_at = dateutil_parse(trakt_episode.get('updated_at'), ignoretz=True) self.cached_at = datetime.now() for col in ['title', 'season', 'number', 'number_abs', 'overview']: setattr(self, col, trakt_episode.get(col)) @property def expired(self): # TODO should episode have its own expiration function? return False class TraktShow(Base): __tablename__ = 'trakt_shows' id = Column(Integer, primary_key=True, autoincrement=False) title = Column(Unicode) year = Column(Integer) slug = Column(Unicode) tvdb_id = Column(Integer) imdb_id = Column(Unicode) tmdb_id = Column(Integer) tvrage_id = Column(Unicode) overview = Column(Unicode) first_aired = Column(DateTime) air_day = Column(Unicode) air_time = Column(Time) timezone = Column(Unicode) runtime = Column(Integer) certification = Column(Unicode) network = Column(Unicode) image_poster_full = Column(Unicode) image_poster_medium = Column(Unicode) image_poster_thumb = Column(Unicode) image_thumb_full = Column(Unicode) country = Column(Unicode) status = Column(String) rating = Column(Integer) votes = Column(Integer) language = Column(Unicode) homepage = Column(Unicode) trailer = Column(Unicode) aired_episodes = Column(Integer) _translations = relation(TraktShowTranslation) _translation_languages = Column('translation_languages', Unicode) translation_languages = json_synonym('_translation_languages') episodes = relation(TraktEpisode, backref='show', cascade='all, delete, delete-orphan', lazy='dynamic') genres = relation(TraktGenre, secondary=show_genres_table) _actors = relation(TraktActor, secondary=show_actors_table) updated_at = Column(DateTime) cached_at = Column(DateTime) @property def main_image(self): for size in ['medium', 'full', 'thumb']: if getattr(self, 'image_poster_%s' % size) is not None: return getattr(self, 'image_poster_%s' % size) def to_dict(self): return { "id": self.id, "title": self.title, "year": self.year, "slug": self.slug, "tvdb_id": self.tvdb_id, "imdb_id": self.imdb_id, "tmdb_id": self.tmdb_id, "tvrage_id": self.tvrage_id, "overview": self.overview, "first_aired": self.first_aired, "air_day": self.air_day, "air_time": self.air_time.strftime("%H:%M") if self.air_time else None, "timezone": self.timezone, "runtime": self.runtime, "certification": self.certification, "network": self.network, "country": self.country, "status": self.status, "rating": self.rating, "votes": self.votes, "language": self.language, "homepage": self.homepage, "number_of_aired_episodes": self.aired_episodes, "genres": [g.name for g in self.genres], "updated_at": self.updated_at, "cached_at": self.cached_at, "images": { 'poster': { 'full': self.image_poster_full, 'medium': self.image_poster_medium, 'thumb': self.image_poster_thumb }, 'thumb': { 'full': self.image_thumb_full } }, "main_image": self.main_image } def __init__(self, trakt_show, session): super(TraktShow, self).__init__() self.update(trakt_show, session) def update(self, trakt_show, session): """Updates this record from the trakt media object `trakt_show` returned by the trakt api.""" if self.id and self.id != trakt_show['ids']['trakt']: raise Exception('Tried to update db show with different show data') elif not self.id: self.id = trakt_show['ids']['trakt'] self.slug = trakt_show['ids']['slug'] self.imdb_id = trakt_show['ids']['imdb'] self.tmdb_id = trakt_show['ids']['tmdb'] self.tvrage_id = trakt_show['ids']['tvrage'] self.tvdb_id = trakt_show['ids']['tvdb'] if trakt_show.get('images'): set_image_attributes(self, trakt_show) if trakt_show.get('airs'): airs = trakt_show.get('airs') self.air_day = airs.get('day') self.timezone = airs.get('timezone') if airs.get('time'): self.air_time = datetime.strptime(airs.get('time'), '%H:%M').time() else: self.air_time = None if trakt_show.get('first_aired'): self.first_aired = dateutil_parse(trakt_show.get('first_aired'), ignoretz=True) else: self.first_aired = None self.updated_at = dateutil_parse(trakt_show.get('updated_at'), ignoretz=True) for col in ['overview', 'runtime', 'rating', 'votes', 'language', 'title', 'year', 'runtime', 'certification', 'network', 'country', 'status', 'aired_episodes', 'trailer', 'homepage']: setattr(self, col, trakt_show.get(col)) self.genres = [TraktGenre(name=g.replace(' ', '-')) for g in trakt_show.get('genres', [])] self.cached_at = datetime.now() self.translation_languages = trakt_show.get('available_translations', []) def get_episode(self, season, number, session, only_cached=False): # TODO: Does series data being expired mean all episode data should be refreshed? episode = self.episodes.filter(TraktEpisode.season == season).filter(TraktEpisode.number == number).first() if not episode or self.expired: url = get_api_url('shows', self.id, 'seasons', season, 'episodes', number, '?extended=full,images') if only_cached: raise LookupError('Episode %s %s not found in cache' % (season, number)) log.debug('Episode %s %s not found in cache, looking up from trakt.', season, number) try: ses = get_session() data = ses.get(url).json() except requests.RequestException: raise LookupError('Error Retrieving Trakt url: %s' % url) if not data: raise LookupError('No data in response from trakt %s' % url) episode = self.episodes.filter(TraktEpisode.id == data['ids']['trakt']).first() if episode: episode.update(data, session) else: episode = TraktEpisode(data, session) self.episodes.append(episode) return episode @property def expired(self): """ :return: True if show details are considered to be expired, ie. need of update """ # TODO stolen from imdb plugin, maybe there's a better way? if self.cached_at is None: log.debug('cached_at is None: %s', self) return True refresh_interval = 2 # if show has been cancelled or ended, then it is unlikely to be updated often if self.year and (self.status == 'ended' or self.status == 'canceled'): # Make sure age is not negative age = max((datetime.now().year - self.year), 0) refresh_interval += age * 5 log.debug('show `%s` age %i expires in %i days', self.title, age, refresh_interval) return self.cached_at < datetime.now() - timedelta(days=refresh_interval) @property def translations(self): if not self._translations: self._translations = get_translations(self.id, 'show') return self._translations @property def actors(self): if not self._actors: self._actors[:] = get_db_actors(self.id, 'show') return self._actors def __repr__(self): return '<name=%s, id=%s>' % (self.title, self.id) class TraktMovie(Base): __tablename__ = 'trakt_movies' id = Column(Integer, primary_key=True, autoincrement=False) title = Column(Unicode) year = Column(Integer) slug = Column(Unicode) imdb_id = Column(Unicode) tmdb_id = Column(Integer) tagline = Column(Unicode) overview = Column(Unicode) released = Column(Date) runtime = Column(Integer) rating = Column(Integer) votes = Column(Integer) trailer = Column(Unicode) homepage = Column(Unicode) language = Column(Unicode) updated_at = Column(DateTime) cached_at = Column(DateTime) _translations = relation(TraktMovieTranslation, backref='movie') _translation_languages = Column('translation_languages', Unicode) translation_languages = json_synonym('_translation_languages') image_fanart_full = Column(Unicode) image_fanart_medium = Column(Unicode) image_fanart_thumb = Column(Unicode) image_poster_full = Column(Unicode) image_poster_medium = Column(Unicode) image_poster_thumb = Column(Unicode) image_logo_full = Column(Unicode) image_clearart_full = Column(Unicode) image_banner_full = Column(Unicode) image_thumb_full = Column(Unicode) genres = relation(TraktGenre, secondary=movie_genres_table) _actors = relation(TraktActor, secondary=movie_actors_table) def __init__(self, trakt_movie, session): super(TraktMovie, self).__init__() self.update(trakt_movie, session) @property def main_image(self): for size in ['medium', 'full', 'thumb']: for image_type in ['poster', 'fanart']: if getattr(self, 'image_%s_%s' % (image_type, size)) is not None: return getattr(self, 'image_%s_%s' % (image_type, size)) def to_dict(self): return { "id": self.id, "title": self.title, "year": self.year, "slug": self.slug, "imdb_id": self.imdb_id, "tmdb_id": self.tmdb_id, "tagline": self.tagline, "overview": self.overview, "released": self.released, "runtime": self.runtime, "rating": self.rating, "votes": self.votes, "language": self.language, "homepage": self.homepage, "trailer": self.trailer, "genres": [g.name for g in self.genres], "updated_at": self.updated_at, "cached_at": self.cached_at, "main_image": self.main_image, "images": { 'fanart': { 'full': self.image_fanart_full, 'medium': self.image_fanart_medium, 'thumb': self.image_fanart_thumb }, 'poster': { 'full': self.image_poster_full, 'medium': self.image_poster_medium, 'thumb': self.image_poster_thumb }, 'logo': { 'full': self.image_logo_full }, 'clearart': { 'full': self.image_clearart_full }, 'banner': { 'full': self.image_banner_full }, 'thumb': { 'full': self.image_thumb_full } } } def update(self, trakt_movie, session): """Updates this record from the trakt media object `trakt_movie` returned by the trakt api.""" if self.id and self.id != trakt_movie['ids']['trakt']: raise Exception('Tried to update db movie with different movie data') elif not self.id: self.id = trakt_movie['ids']['trakt'] self.slug = trakt_movie['ids']['slug'] self.imdb_id = trakt_movie['ids']['imdb'] self.tmdb_id = trakt_movie['ids']['tmdb'] for col in ['title', 'overview', 'runtime', 'rating', 'votes', 'language', 'tagline', 'year', 'trailer', 'homepage']: setattr(self, col, trakt_movie.get(col)) if trakt_movie.get('released'): self.released = dateutil_parse(trakt_movie.get('released'), ignoretz=True) self.updated_at = dateutil_parse(trakt_movie.get('updated_at'), ignoretz=True) self.genres = [TraktGenre(name=g.replace(' ', '-')) for g in trakt_movie.get('genres', [])] self.cached_at = datetime.now() if trakt_movie.get('images'): set_image_attributes(self, trakt_movie) self.translation_languages = trakt_movie.get('available_translations', []) @property def expired(self): """ :return: True if movie details are considered to be expired, ie. need of update """ # TODO stolen from imdb plugin, maybe there's a better way? if self.updated_at is None: log.debug('updated_at is None: %s', self) return True refresh_interval = 2 if self.year: # Make sure age is not negative age = max((datetime.now().year - self.year), 0) refresh_interval += age * 5 log.debug('movie `%s` age %i expires in %i days', self.title, age, refresh_interval) return self.cached_at < datetime.now() - timedelta(days=refresh_interval) @property def translations(self): if not self._translations: self._translations = get_translations(self.id, 'movie') return self._translations @property def actors(self): if not self._actors: self._actors[:] = get_db_actors(self.id, 'movie') return self._actors class TraktShowSearchResult(Base): __tablename__ = 'trakt_show_search_results' id = Column(Integer, primary_key=True) search = Column(Unicode, unique=True, nullable=False) series_id = Column(Integer, ForeignKey('trakt_shows.id'), nullable=True) series = relation(TraktShow, backref='search_strings') def __init__(self, search, series_id=None, series=None): self.search = search.lower() if series_id: self.series_id = series_id if series: self.series = series class TraktMovieSearchResult(Base): __tablename__ = 'trakt_movie_search_results' id = Column(Integer, primary_key=True) search = Column(Unicode, unique=True, nullable=False) movie_id = Column(Integer, ForeignKey('trakt_movies.id'), nullable=True) movie = relation(TraktMovie, backref='search_strings') def __init__(self, search, movie_id=None, movie=None): self.search = search.lower() if movie_id: self.movie_id = movie_id if movie: self.movie = movie def split_title_year(title): """Splits title containing a year into a title, year pair.""" # We only recognize years from the 2nd and 3rd millennium, FlexGetters from the year 3000 be damned! match = re.search(r'[\s(]([12]\d{3})\)?$', title) if match: title = title[:match.start()].strip() year = int(match.group(1)) else: year = None return title, year @with_session def get_cached(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None, tvdb_id=None, tvrage_id=None, session=None): """ Get the cached info for a given show/movie from the database. :param type: Either 'show' or 'movie' """ ids = { 'id': trakt_id, 'slug': trakt_slug, 'tmdb_id': tmdb_id, 'imdb_id': imdb_id, } if style == 'show': ids['tvdb_id'] = tvdb_id ids['tvrage_id'] = tvrage_id model = TraktShow else: model = TraktMovie result = None if any(ids.values()): result = session.query(model).filter( or_(getattr(model, col) == val for col, val in ids.items() if val)).first() elif title: title, y = split_title_year(title) year = year or y query = session.query(model).filter(model.title == title) if year: query = query.filter(model.year == year) result = query.first() return result def get_trakt(style=None, title=None, year=None, trakt_id=None, trakt_slug=None, tmdb_id=None, imdb_id=None, tvdb_id=None, tvrage_id=None): """Returns the matching media object from trakt api.""" # TODO: Better error messages # Trakt api accepts either id or slug (there is a rare possibility for conflict though, e.g. 24) trakt_id = trakt_id or trakt_slug if not any([title, trakt_id, tmdb_id, imdb_id, tvdb_id, tvrage_id]): raise LookupError('No lookup arguments provided.') req_session = get_session() last_search_query = None # used if no results are found last_search_type = None if not trakt_id: # Try finding trakt_id based on other ids ids = { 'imdb': imdb_id, 'tmdb': tmdb_id } if style == 'show': ids['tvdb'] = tvdb_id ids['tvrage'] = tvrage_id for id_type, identifier in ids.items(): if not identifier: continue try: last_search_query = identifier last_search_type = id_type log.debug('Searching with params: %s=%s', id_type, identifier) results = req_session.get(get_api_url('search'), params={'id_type': id_type, 'id': identifier}).json() except requests.RequestException as e: raise LookupError('Searching trakt for %s=%s failed with error: %s' % (id_type, identifier, e)) for result in results: if result['type'] != style: continue trakt_id = result[style]['ids']['trakt'] break if not trakt_id and title: last_search_query = title last_search_type = 'title' # Try finding trakt id based on title and year if style == 'show': parsed_title, y = split_title_year(title) y = year or y else: title_parser = get_plugin_by_name('parsing').instance.parse_movie(title) y = year or title_parser.year parsed_title = title_parser.name try: params = {'query': parsed_title, 'type': style, 'year': y} log.debug('Type of title: %s', type(parsed_title)) log.debug('Searching with params: %s', ', '.join('{}={}'.format(k, v) for (k, v) in params.items())) results = req_session.get(get_api_url('search'), params=params).json() except requests.RequestException as e: raise LookupError('Searching trakt for %s failed with error: %s' % (title, e)) for result in results: if year and result[style]['year'] != year: continue if parsed_title.lower() == result[style]['title'].lower(): trakt_id = result[style]['ids']['trakt'] break # grab the first result if there is no exact match if not trakt_id and results: trakt_id = results[0][style]['ids']['trakt'] if not trakt_id: raise LookupError('Unable to find %s="%s" on trakt.' % (last_search_type, last_search_query)) # Get actual data from trakt try: return req_session.get(get_api_url(style + 's', trakt_id), params={'extended': 'full,images'}).json() except requests.RequestException as e: raise LookupError('Error getting trakt data for id %s: %s' % (trakt_id, e)) def update_collection_cache(style_ident, username=None, account=None): if account and not username: username = 'me' url = get_api_url('users', username, 'collection', style_ident) session = get_session(account=account) try: data = session.get(url).json() if not data: log.warning('No collection data returned from trakt.') return cache = get_user_cache(username=username, account=account)['collection'][style_ident] log.verbose('Received %d records from trakt.tv %s\'s collection', len(data), username) if style_ident == 'movies': for movie in data: movie_id = movie['movie']['ids']['trakt'] cache[movie_id] = movie['movie'] cache[movie_id]['collected_at'] = dateutil_parse(movie['collected_at'], ignoretz=True) else: for series in data: series_id = series['show']['ids']['trakt'] cache[series_id] = series['show'] cache[series_id]['seasons'] = series['seasons'] cache[series_id]['collected_at'] = dateutil_parse(series['last_collected_at'], ignoretz=True) except requests.RequestException as e: raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e) def update_watched_cache(style_ident, username=None, account=None): if account and not username: username = 'me' url = get_api_url('users', username, 'watched', style_ident) session = get_session(account=account) try: data = session.get(url).json() if not data: log.warning('No watched data returned from trakt.') return cache = get_user_cache(username=username, account=account)['watched'][style_ident] log.verbose('Received %d record(s) from trakt.tv %s\'s watched history', len(data), username) if style_ident == 'movies': for movie in data: movie_id = movie['movie']['ids']['trakt'] cache[movie_id] = movie['movie'] cache[movie_id]['watched_at'] = dateutil_parse(movie['last_watched_at'], ignoretz=True) cache[movie_id]['plays'] = movie['plays'] else: for series in data: series_id = series['show']['ids']['trakt'] cache[series_id] = series['show'] cache[series_id]['seasons'] = series['seasons'] cache[series_id]['watched_at'] = dateutil_parse(series['last_watched_at'], ignoretz=True) cache[series_id]['plays'] = series['plays'] except requests.RequestException as e: raise plugin.PluginError('Unable to get data from trakt.tv: %s' % e) def get_user_cache(username=None, account=None): identifier = '{}|{}'.format(account, username or 'me') ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('watched', {}).setdefault('shows', {}) ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('watched', {}).setdefault('movies', {}) ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('collection', {}).setdefault('shows', {}) ApiTrakt.user_cache.setdefault(identifier, {}).setdefault('collection', {}).setdefault('movies', {}) return ApiTrakt.user_cache[identifier] class ApiTrakt(object): user_cache = TimedDict(cache_time='15 minutes') @staticmethod @with_session def lookup_series(session=None, only_cached=None, **lookup_params): series = get_cached('show', session=session, **lookup_params) title = lookup_params.get('title') or '' found = None if not series and title: found = session.query(TraktShowSearchResult).filter(TraktShowSearchResult.search == title.lower()).first() if found and found.series: log.debug('Found %s in previous search results as %s', title, found.series.title) series = found.series if only_cached: if series: return series raise LookupError('Series %s not found from cache' % lookup_params) if series and not series.expired: return series try: trakt_show = get_trakt('show', **lookup_params) except LookupError as e: if series: log.debug('Error refreshing show data from trakt, using cached. %s', e) return series raise series = session.merge(TraktShow(trakt_show, session)) if series and title.lower() == series.title.lower(): return series elif series and title and not found: if not session.query(TraktShowSearchResult).filter(TraktShowSearchResult.search == title.lower()).first(): log.debug('Adding search result to db') session.merge(TraktShowSearchResult(search=title, series=series)) elif series and found: log.debug('Updating search result in db') found.series = series return series @staticmethod @with_session def lookup_movie(session=None, only_cached=None, **lookup_params): movie = get_cached('movie', session=session, **lookup_params) title = lookup_params.get('title') or '' found = None if not movie and title: found = session.query(TraktMovieSearchResult).filter(TraktMovieSearchResult.search == title.lower()).first() if found and found.movie: log.debug('Found %s in previous search results as %s', title, found.movie.title) movie = found.movie if only_cached: if movie: return movie raise LookupError('Movie %s not found from cache' % lookup_params) if movie and not movie.expired: return movie try: trakt_movie = get_trakt('movie', **lookup_params) except LookupError as e: if movie: log.debug('Error refreshing movie data from trakt, using cached. %s', e) return movie raise movie = session.merge(TraktMovie(trakt_movie, session)) if movie and title.lower() == movie.title.lower(): return movie if movie and title and not found: if not session.query(TraktMovieSearchResult).filter(TraktMovieSearchResult.search == title.lower()).first(): log.debug('Adding search result to db') session.merge(TraktMovieSearchResult(search=title, movie=movie)) elif movie and found: log.debug('Updating search result in db') found.movie = movie return movie @staticmethod def collected(style, trakt_data, title, username=None, account=None): style_ident = 'movies' if style == 'movie' else 'shows' cache = get_user_cache(username=username, account=account) if not cache['collection'][style_ident]: log.debug('No collection found in cache.') update_collection_cache(style_ident, username=username, account=account) if not cache['collection'][style_ident]: log.warning('No collection data returned from trakt.') return in_collection = False cache = cache['collection'][style_ident] if style == 'show': if trakt_data.id in cache: series = cache[trakt_data.id] # specials are not included number_of_collected_episodes = sum(len(s['episodes']) for s in series['seasons'] if s['number'] > 0) in_collection = number_of_collected_episodes >= trakt_data.aired_episodes elif style == 'episode': if trakt_data.show.id in cache: series = cache[trakt_data.show.id] for s in series['seasons']: if s['number'] == trakt_data.season: # extract all episode numbers currently in collection for the season number episodes = [ep['number'] for ep in s['episodes']] in_collection = trakt_data.number in episodes break else: if trakt_data.id in cache: in_collection = True log.debug('The result for entry "%s" is: %s', title, 'Owned' if in_collection else 'Not owned') return in_collection @staticmethod def watched(style, trakt_data, title, username=None, account=None): style_ident = 'movies' if style == 'movie' else 'shows' cache = get_user_cache(username=username, account=account) if not cache['watched'][style_ident]: log.debug('No watched history found in cache.') update_watched_cache(style_ident, username=username, account=account) if not cache['watched'][style_ident]: log.warning('No watched data returned from trakt.') return watched = False cache = cache['watched'][style_ident] if style == 'show': if trakt_data.id in cache: series = cache[trakt_data.id] # specials are not included number_of_watched_episodes = sum(len(s['episodes']) for s in series['seasons'] if s['number'] > 0) watched = number_of_watched_episodes == trakt_data.aired_episodes elif style == 'episode': if trakt_data.show.id in cache: series = cache[trakt_data.show.id] for s in series['seasons']: if s['number'] == trakt_data.season: # extract all episode numbers currently in collection for the season number episodes = [ep['number'] for ep in s['episodes']] watched = trakt_data.number in episodes break else: if trakt_data.id in cache: watched = True log.debug('The result for entry "%s" is: %s', title, 'Watched' if watched else 'Not watched') return watched @event('plugin.register') def register_plugin(): plugin.register(ApiTrakt, 'api_trakt', api_ver=2)
dsemi/Flexget
flexget/plugins/internal/api_trakt.py
Python
mit
50,434
[ "VisIt" ]
53980165927900afc6b6266d9ea404cab9bab8c6e9d991d26de5df2ce646fd19
import os import shutil import bcbio.bam as bam import toolz as tz from bcbio.utils import (file_exists, safe_makedir, append_stem) from bcbio.pipeline import config_utils from bcbio.bam import is_paired from bcbio.provenance import do from bcbio.distributed.transaction import file_transaction import bcbio.pipeline.datadict as dd try: import pandas as pd except ImportError: pd = None def count(data): """ count reads mapping to genes using featureCounts http://subread.sourceforge.net """ in_bam = dd.get_work_bam(data) or dd.get_align_bam(data) out_dir = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data)) if dd.get_aligner(data) == "star": out_dir = os.path.join(out_dir, "%s_%s" % (dd.get_sample_name(data), dd.get_aligner(data))) sorted_bam = bam.sort(in_bam, dd.get_config(data), order="queryname", out_dir=safe_makedir(out_dir)) gtf_file = dd.get_transcriptome_gtf(data, default=dd.get_gtf_file(data)) work_dir = dd.get_work_dir(data) out_dir = os.path.join(work_dir, "htseq-count") safe_makedir(out_dir) count_file = os.path.join(out_dir, dd.get_sample_name(data)) + ".counts" summary_file = os.path.join(out_dir, dd.get_sample_name(data)) + ".counts.summary" if file_exists(count_file) and _is_fixed_count_file(count_file): return count_file featureCounts = config_utils.get_program("featureCounts", dd.get_config(data)) paired_flag = _paired_flag(in_bam) strand_flag = _strand_flag(data) filtered_bam = bam.filter_primary(sorted_bam, data) cmd = ("{featureCounts} -a {gtf_file} -o {tx_count_file} -s {strand_flag} " "{paired_flag} {filtered_bam}") resources = config_utils.get_resources("featureCounts", data["config"]) if resources: options = resources.get("options") if options: cmd += " %s" % " ".join([str(x) for x in options]) message = ("Count reads in {tx_count_file} mapping to {gtf_file} using " "featureCounts") with file_transaction(data, [count_file, summary_file]) as tx_files: tx_count_file, tx_summary_file = tx_files do.run(cmd.format(**locals()), message.format(**locals())) fixed_count_file = _format_count_file(count_file, data) fixed_summary_file = _change_sample_name( summary_file, dd.get_sample_name(data), data=data) shutil.move(fixed_count_file, count_file) shutil.move(fixed_summary_file, summary_file) return count_file def chipseq_count(data): """ count reads mapping to ChIP/ATAC consensus peaks with featureCounts """ method = dd.get_chip_method(data) if method == "chip": in_bam = dd.get_work_bam(data) elif method == "atac": if bam.is_paired(dd.get_work_bam(data)): in_bam = tz.get_in(("atac", "align", "NF"), data) else: in_bam = tz.get_in(("atac", "align", "full"), data) out_dir = os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data)) sorted_bam = bam.sort(in_bam, dd.get_config(data), order="queryname", out_dir=safe_makedir(out_dir)) consensus_file = tz.get_in(("peaks_files", "consensus", "main"), data) if not consensus_file: return [[data]] saf_file = os.path.splitext(consensus_file)[0] + ".saf" work_dir = dd.get_work_dir(data) out_dir = os.path.join(work_dir, "consensus") safe_makedir(out_dir) count_file = os.path.join(out_dir, dd.get_sample_name(data)) + ".counts" summary_file = os.path.join(out_dir, dd.get_sample_name(data)) + ".counts.summary" if file_exists(count_file) and _is_fixed_count_file(count_file): if method == "atac": if bam.is_paired(dd.get_work_bam(data)): data = tz.assoc_in(data, ("peak_counts", "NF"), count_file) else: data = tz.assoc_in(data, ("peak_counts", "full"), count_file) elif method == "chip": data = tz.assoc_in(data, ("peak_counts"), count_file) return [[data]] featureCounts = config_utils.get_program("featureCounts", dd.get_config(data)) paired_flag = _paired_flag(in_bam) strand_flag = _strand_flag(data) cmd = ("{featureCounts} -F SAF -a {saf_file} -o {tx_count_file} -s {strand_flag} " "{paired_flag} {sorted_bam}") message = ("Count reads in {sorted_bam} overlapping {saf_file} using " "featureCounts.") with file_transaction(data, [count_file, summary_file]) as tx_files: tx_count_file, tx_summary_file = tx_files do.run(cmd.format(**locals()), message.format(**locals())) fixed_count_file = _format_count_file(count_file, data) fixed_summary_file = _change_sample_name( summary_file, dd.get_sample_name(data), data=data) shutil.move(fixed_count_file, count_file) shutil.move(fixed_summary_file, summary_file) if method == "atac": if bam.is_paired(dd.get_work_bam(data)): data = tz.assoc_in(data, ("peak_counts", "NF"), count_file) else: data = tz.assoc_in(data, ("peak_counts", "full"), count_file) elif method == "chip": data = tz.assoc_in(data, ("peak_counts"), count_file) return [[data]] def _change_sample_name(in_file, sample_name, data=None): """Fix name in feature counts log file to get the same name in multiqc report. """ out_file = append_stem(in_file, "_fixed") with file_transaction(data, out_file) as tx_out: with open(tx_out, "w") as out_handle: with open(in_file) as in_handle: for line in in_handle: if line.startswith("Status"): line = "Status\t%s.bam" % sample_name out_handle.write("%s\n" % line.strip()) return out_file def _is_fixed_count_file(count_file): if os.path.exists(count_file): with open(count_file) as in_handle: line = in_handle.readline().split("\t") return len(line) == 2 def _format_count_file(count_file, data): """ this cuts the count file produced from featureCounts down to a two column file of gene ids and number of reads mapping to each gene """ COUNT_COLUMN = 5 out_file = os.path.splitext(count_file)[0] + ".fixed.counts" if file_exists(out_file) and _is_fixed_count_file(out_file): return out_file df = pd.io.parsers.read_csv(count_file, sep="\t", index_col=0, header=1) df_sub = df.iloc[:, COUNT_COLUMN] with file_transaction(data, out_file) as tx_out_file: df_sub.to_csv(tx_out_file, sep="\t", index_label="id", header=False) return out_file def _strand_flag(data): """ 0: unstranded 1: stranded 2: reverse stranded """ strand_flag = {"unstranded": "0", "firststrand": "2", "secondstrand": "1", "auto": "0"} stranded = dd.get_strandedness(data) assert stranded in strand_flag, ("%s is not a valid strandedness value. " "Valid values are 'firststrand', 'secondstrand', " "and 'unstranded") return strand_flag[stranded] def _paired_flag(bam_file): """ sets flags to handle paired-end BAM files """ if is_paired(bam_file): return "-p -B -C" else: return ""
lbeltrame/bcbio-nextgen
bcbio/rnaseq/featureCounts.py
Python
mit
7,393
[ "HTSeq" ]
14f04d6723163a47bfc88f9d2b10c19eb531a2cf6e769f84fcb3a0a75dfe28dd
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module provides objects describing the basic parameters of the pseudopotentials used in Abinit, and a parser to instantiate pseudopotential objects.. """ from __future__ import unicode_literals, division, print_function import sys import os import abc import collections import json import six #import pprint import numpy as np from warnings import warn from collections import OrderedDict, defaultdict, namedtuple from monty.string import list_strings, is_string from monty.itertools import iterator_from_slice from monty.io import FileLock from monty.collections import AttrDict, Namespace from monty.functools import lazy_property from monty.os.path import find_exts from monty.dev import deprecated from monty.json import MSONable, MontyDecoder from pymatgen.util.plotting_utils import add_fig_kwargs, get_ax_fig_plt from pymatgen.core.periodic_table import PeriodicTable, Element from pymatgen.serializers.json_coders import pmg_serialize from .eos import EOS import logging logger = logging.getLogger(__name__) __all__ = [ "Pseudo", "PseudoTable", ] __author__ = "Matteo Giantomassi" __version__ = "0.1" __maintainer__ = "Matteo Giantomassi" _PTABLE = PeriodicTable() # Tools and helper functions. def straceback(): """Returns a string with the traceback.""" import sys import traceback return "\n".join((traceback.format_exc(), str(sys.exc_info()[0]))) def _read_nlines(filename, nlines): """ Read at most nlines lines from file filename. If nlines is < 0, the entire file is read. """ if nlines < 0: with open(filename, 'r') as fh: return fh.readlines() lines = [] with open(filename, 'r') as fh: for (lineno, line) in enumerate(fh): if lineno == nlines: break lines.append(line) return lines _l2str = { 0: "s", 1: "p", 2: "d", 3: "f", 4: "g", 5: "h", 6: "i", } _str2l = {v: k for k, v in _l2str.items()} def l2str(l): """Convert the angular momentum l (int) to string.""" try: return _l2str[l] except KeyError: return "Unknown angular momentum, received l = %s" % l def str2l(s): """Convert a string to the angular momentum l (int)""" return _str2l[s] class Pseudo(six.with_metaclass(abc.ABCMeta, MSONable, object)): """ Abstract base class defining the methods that must be implemented by the concrete pseudopotential classes. """ @classmethod def as_pseudo(cls, obj): """ Convert obj into a pseudo. Accepts: * Pseudo object. * string defining a valid path. """ return obj if isinstance(obj, cls) else cls.from_file(obj) @staticmethod def from_file(filename): """ Return a :class:`Pseudo` object from filename. Note: the parser knows the concrete class that should be instanciated """ return PseudoParser().parse(filename) def __eq__(self, other): if other is None: return False # TODO # For the time being we check the filepath # A more robust algorithm would use md5 #return self.filepath == other.filepath return (self.md5 == other.md5 and self.__class__ == other.__class__ and self.Z == other.Z and self.Z_val == other.Z_val and self.l_max == other.l_max ) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "<%s at %s>" % (self.__class__.__name__, self.filepath) def __str__(self): """String representation.""" lines = [] app = lines.append app("<%s: %s>" % (self.__class__.__name__, self.basename)) app(" summary: " + self.summary.strip()) app(" number of valence electrons: %s" % self.Z_val) #FIXME: rewrite the treatment of xc, use XML specs as starting point #app(" XC correlation (ixc): %s" % self._pspxc) #FIXME app(" maximum angular momentum: %s" % l2str(self.l_max)) app(" angular momentum for local part: %s" % l2str(self.l_local)) if self.isnc: app(" radius for non-linear core correction: %s" % self.nlcc_radius) app("") if self.has_hints: hint_normal = self.hint_for_accuracy() if hint_normal is not None: app(" hint for normal accuracy: %s" % str(hint_normal)) else: app(" hints on cutoff-energy are not available") return "\n".join(lines) @abc.abstractproperty def summary(self): """String summarizing the most important properties.""" @property def filepath(self): return os.path.abspath(self.path) @property def basename(self): """File basename.""" return os.path.basename(self.filepath) @abc.abstractproperty def Z(self): """The atomic number of the atom.""" @abc.abstractproperty def Z_val(self): """Valence charge""" @property def type(self): return self.__class__.__name__ @property def element(self): """Pymatgen :class:`Element`.""" try: return _PTABLE[self.Z] except (KeyError, IndexError): return _PTABLE[int(self.Z)] @property def symbol(self): """Element symbol.""" return self.element.symbol @abc.abstractproperty def l_max(self): """Maximum angular momentum.""" @abc.abstractproperty def l_local(self): """Angular momentum used for the local part.""" @property def isnc(self): """True if norm-conserving pseudopotential.""" return isinstance(self, NcPseudo) @property def ispaw(self): """True if PAW pseudopotential.""" return isinstance(self, PawPseudo) @lazy_property def md5(self): """MD5 hash value.""" if self.has_dojo_report: if "md5" in self.dojo_report: return self.dojo_report["md5"] else: warn("Dojo report without md5 entry") return self.compute_md5() def compute_md5(self): """Compute MD5 hash value.""" import hashlib if self.path.endswith(".xml"): # TODO: XML + DOJO_REPORT #raise NotImplementedError("md5 for XML files!") with open(self.path, "rt") as fh: text = fh.read() else: # If we have a pseudo with a dojo_report at the end. # we compute the hash from the data before DOJO_REPORT. # else all the lines are taken. with open(self.path, "rt") as fh: lines = fh.readlines() try: start = lines.index("<DOJO_REPORT>\n") except ValueError: start = len(lines) text = "".join(lines[:start]) m = hashlib.md5(text.encode("utf-8")) return m.hexdigest() def check_and_fix_dojo_md5(self): report = self.read_dojo_report() if "md5" in report: if report["md5"] != self.md5: raise ValueError("md5 found in dojo_report does not agree\n" "with the computed value\nFound %s\nComputed %s" % (report["md5"], hash)) else: report["md5"] = self.compute_md5() self.write_dojo_report(report=report) #@abc.abstractproperty #def xc_type(self): # """XC family e.g LDA, GGA, MGGA.""" #@abc.abstractproperty #def xc_flavor(self): # """XC flavor e.g PW, PW91, PBE.""" #@property #def xc_functional(self): # """XC identifier e.g LDA-PW91, GGA-PBE, GGA-revPBE.""" # return "-".join([self.xc_type, self.xc_flavor]) #@abc.abstractproperty #def has_soc(self): # """True if pseudo contains spin-orbit coupling.""" #@abc.abstractmethod #def num_of_projectors(self, l='s'): # """Number of projectors for the angular channel l""" #@abc.abstractmethod #def generation_mode # """scalar scalar-relativistic, relativistic.""" @pmg_serialize def as_dict(self, **kwargs): return dict( basename=self.basename, type=self.type, symbol=self.symbol, Z=self.Z, Z_val=self.Z_val, l_max=self.l_max, md5=self.md5, filepath=self.filepath ) @classmethod def from_dict(cls, d): new = cls.from_file(d['filepath']) # Consistency test based on md5 if "md5" in d and d["md5"] != new.md5: raise ValueError("The md5 found in file does not agree with the one in dict\n" "Received %s\nComputed %s" % (d["md5"], new.md5)) return new def as_tmpfile(self): """ Copy the pseudopotential to a temporary a file and returns a new pseudopotential object. """ import tempfile, shutil _, dst = tempfile.mkstemp(suffix=self.basename, text=True) shutil.copy(self.path, dst) return self.__class__.from_file(dst) @property def has_dojo_report(self): """True if self contains the `DOJO_REPORT` section.""" return bool(self.dojo_report) def delta_factor(self, accuracy="normal"): """ Returns the deltafactor [meV/natom] computed with the given accuracy. None if the `Pseudo` does not have info on the deltafactor. """ if not self.has_dojo_report: return None try: return self.dojo_report["delta_factor"][accuracy]["dfact"] except KeyError: return None def read_dojo_report(self): """ Read the `DOJO_REPORT` section and set the `dojo_report` attribute. returns {} if section is not present. """ self.dojo_report = DojoReport.from_file(self.path) return self.dojo_report def write_dojo_report(self, report=None): """Write a new `DOJO_REPORT` section to the pseudopotential file.""" if report is None: report = self.dojo_report report["symbol"] = self.symbol if "md5" not in report: report["md5"] = self.md5 if report["md5"] != self.md5: raise ValueError("md5 found in dojo_report does not agree\n" "with the computed value\nreport: %s\npseudo %s" % (report["md5"], self.md5)) # Create JSON string from report. jstring = json.dumps(report, indent=4, sort_keys=True) + "\n" # Read lines from file and insert jstring between the tags. with open(self.path, "r") as fh: lines = fh.readlines() try: start = lines.index("<DOJO_REPORT>\n") except ValueError: start = -1 if start == -1: # DOJO_REPORT was not present. lines += ["<DOJO_REPORT>\n", jstring , "</DOJO_REPORT>\n",] else: stop = lines.index("</DOJO_REPORT>\n") lines.insert(stop, jstring) del lines[start+1:stop] # Write new file. with FileLock(self.path): with open(self.path, "w") as fh: fh.writelines(lines) def remove_dojo_report(self): """Remove the `DOJO_REPORT` section from the pseudopotential file.""" # Read lines from file and insert jstring between the tags. with open(self.path, "r") as fh: lines = fh.readlines() try: start = lines.index("<DOJO_REPORT>\n") except ValueError: start = -1 if start == -1: return stop = lines.index("</DOJO_REPORT>\n") if stop == -1: return del lines[start+1:stop] # Write new file. with FileLock(self.path): with open(self.path, "w") as fh: fh.writelines(lines) def hint_for_accuracy(self, accuracy="normal"): """ Returns an hint object with parameters such as ecut [Ha] and aug_ratio for given accuracy. Returns None if no hint is available. Args: accuracy: ["low", "normal", "high"] """ if self.has_dojo_report: try: return Hint.from_dict(self.dojo_report["hints"][accuracy]) except KeyError: return None else: return None @property def has_hints(self): """True if self provides hints on the cutoff energy.""" for acc in ["low", "normal", "high"]: try: if self.hint_for_accuracy(acc) is None: return False except KeyError: return False return True def open_pspsfile(self, ecut=20, pawecutdg=None): """ Calls Abinit to compute the internal tables for the application of the pseudopotential part. Returns :class:`PspsFile` object providing methods to plot and analyze the data or None if file is not found or it's not readable. Args: ecut: Cutoff energy in Hartree. pawecutdg: Cutoff energy for the PAW double grid. """ from pymatgen.io.abinit.tasks import AbinitTask from abipy.core.structure import Structure from abipy.abio.factories import gs_input from abipy.electrons.psps import PspsFile # Build fake structure. lattice = 10 * np.eye(3) structure = Structure(lattice, [self.element], coords=[[0, 0, 0]]) if self.ispaw and pawecutdg is None: pawecudg = ecut * 4 inp = gs_input(structure, pseudos=[self], ecut=ecut, pawecutdg=pawecutdg, spin_mode="unpolarized", kppa=1) # Add prtpsps = -1 to make Abinit print the PSPS.nc file and stop. inp["prtpsps"] = -1 # Build temporary task and run it. task = AbinitTask.temp_shell_task(inp) retcode = task.start_and_wait() filepath = task.outdir.has_abiext("_PSPS.nc") if not filepath: logger.critical("Cannot find PSPS.nc file in %s" % task.outdir) return None # Open the PSPS.nc file. try: return PspsFile(filepath) except Exception as exc: logger.critical("Exception while reading PSPS file at %s:\n%s" % (filepath, str(exc))) return None class NcPseudo(six.with_metaclass(abc.ABCMeta, object)): """ Abstract class defining the methods that must be implemented by the concrete classes representing norm-conserving pseudopotentials. """ @abc.abstractproperty def nlcc_radius(self): """ Radius at which the core charge vanish (i.e. cut-off in a.u.). Returns 0.0 if nlcc is not used. """ @property def has_nlcc(self): """True if the pseudo is generated with non-linear core correction.""" return self.nlcc_radius > 0.0 @property def rcore(self): """Radius of the pseudization sphere in a.u.""" try: return self._core except AttributeError: return None class PawPseudo(six.with_metaclass(abc.ABCMeta, object)): """ Abstract class that defines the methods that must be implemented by the concrete classes representing PAW pseudopotentials. """ #def nlcc_radius(self): # """ # Radius at which the core charge vanish (i.e. cut-off in a.u.). # Returns 0.0 if nlcc is not used. # """ # return 0.0 # #@property #def has_nlcc(self): # """True if the pseudo is generated with non-linear core correction.""" # return True @abc.abstractproperty def paw_radius(self): """Radius of the PAW sphere in a.u.""" @property def rcore(self): """Alias of paw_radius.""" return self.paw_radius class AbinitPseudo(Pseudo): """ An AbinitPseudo is a pseudopotential whose file contains an abinit header. """ def __init__(self, path, header): """ Args: path: Filename. header: :class:`AbinitHeader` instance. """ self.path = path self._summary = header.summary if hasattr(header, "dojo_report"): self.dojo_report = header.dojo_report else: self.dojo_report = {} #self.pspcod = header.pspcod for attr_name, desc in header.items(): value = header.get(attr_name, None) # Hide these attributes since one should always use the public interface. setattr(self, "_" + attr_name, value) @property def summary(self): """Summary line reported in the ABINIT header.""" return self._summary.strip() @property def Z(self): return self._zatom @property def Z_val(self): return self._zion @property def l_max(self): return self._lmax @property def l_local(self): return self._lloc class NcAbinitPseudo(NcPseudo, AbinitPseudo): """Norm-conserving pseudopotential in the Abinit format.""" @property def summary(self): return self._summary.strip() @property def Z(self): return self._zatom @property def Z_val(self): """Number of valence electrons.""" return self._zion @property def l_max(self): return self._lmax @property def l_local(self): return self._lloc @property def nlcc_radius(self): return self._rchrg class PawAbinitPseudo(PawPseudo, AbinitPseudo): """Paw pseudopotential in the Abinit format.""" @property def paw_radius(self): return self._r_cut #def orbitals(self): #class Hint(namedtuple("Hint", "ecut aug_ratio")): class Hint(object): """ Suggested value for the cutoff energy [Hartree units] and the cutoff energy for the dense grid (only for PAW pseudos) """ def __init__(self, ecut, pawecutdg=None): self.ecut = ecut self.pawecutdg = ecut if pawecutdg is None else pawecutdg @pmg_serialize def as_dict(self): return dict(ecut=self.ecut, pawecutdg=self.pawecutdg) @classmethod def from_dict(cls, d): return cls(**{k: v for k,v in d.items() if not k.startswith("@")}) def _dict_from_lines(lines, key_nums, sep=None): """ Helper function to parse formatted text structured like: value1 value2 ... sep key1, key2 ... key_nums is a list giving the number of keys for each line. 0 if line should be skipped. sep is a string denoting the character that separates the keys from the value (None if no separator is present). Returns: dict{key1 : value1, key2 : value2, ...} Raises: ValueError if parsing fails. """ if is_string(lines): lines = [lines] if not isinstance(key_nums, collections.Iterable): key_nums = list(key_nums) if len(lines) != len(key_nums): err_msg = "lines = %s\n key_num = %s" % (str(lines), str(key_nums)) raise ValueError(err_msg) kwargs = Namespace() for (i, nk) in enumerate(key_nums): if nk == 0: continue line = lines[i] tokens = [t.strip() for t in line.split()] values, keys = tokens[:nk], "".join(tokens[nk:]) # Sanitize keys: In some case we might string in for foo[,bar] keys.replace("[", "").replace("]", "") keys = keys.split(",") if sep is not None: check = keys[0][0] if check != sep: raise ValueError("Expecting separator %s, got %s" % (sep, check)) keys[0] = keys[0][1:] if len(values) != len(keys): msg = "line: %s\n len(keys) != len(value)\nkeys: %s\n values: %s" % (line, keys, values) raise ValueError(msg) kwargs.update(zip(keys, values)) return kwargs class AbinitHeader(dict): """Dictionary whose keys can be also accessed as attributes.""" def __getattr__(self, name): try: # Default behaviour return super(AbinitHeader, self).__getattribute__(name) except AttributeError: try: # Try in the dictionary. return self[name] except KeyError as exc: raise AttributeError(str(exc)) def _int_from_str(string): """ Convert string into integer Raise: TypeError if string is not a valid integer """ float_num = float(string) int_num = int(float_num) if float_num == int_num: return int_num else: # Needed to handle pseudos with fractional charge int_num = np.rint(float_num) warn("Converting float %s to int %s" % (float_num, int_num)) return int_num #raise TypeError("Cannot convert string %s to int" % string) class NcAbinitHeader(AbinitHeader): """The abinit header found in the NC pseudopotential files.""" _attr_desc = namedtuple("att", "default astype") _VARS = { # Mandatory "zatom" : _attr_desc(None, _int_from_str), "zion" : _attr_desc(None, float), "pspdat" : _attr_desc(None, float), "pspcod" : _attr_desc(None, int), "pspxc" : _attr_desc(None, int), "lmax" : _attr_desc(None, int), "lloc" : _attr_desc(None, int), "r2well" : _attr_desc(None, float), "mmax" : _attr_desc(None, float), # Optional variables for non linear-core correction. HGH does not have it. "rchrg" : _attr_desc(0.0, float), # radius at which the core charge vanish (i.e. cut-off in a.u.) "fchrg" : _attr_desc(0.0, float), "qchrg" : _attr_desc(0.0, float), } del _attr_desc def __init__(self, summary, **kwargs): super(NcAbinitHeader, self).__init__() # APE uses llocal instead of lloc. if "llocal" in kwargs: kwargs["lloc"] = kwargs.pop("llocal") self.summary = summary.strip() for (key, desc) in NcAbinitHeader._VARS.items(): default, astype = desc.default, desc.astype value = kwargs.pop(key, None) if value is None: value = default if default is None: raise RuntimeError("Attribute %s must be specified" % key) else: try: value = astype(value) except: raise RuntimeError("Conversion Error for key %s, value %s" % (key, value)) self[key] = value # Add dojo_report self["dojo_report"] = kwargs.pop("dojo_report", {}) #if kwargs: # raise RuntimeError("kwargs should be empty but got %s" % str(kwargs)) @staticmethod def fhi_header(filename, ppdesc): """Parse the FHI abinit header.""" # Example: # Troullier-Martins psp for element Sc Thu Oct 27 17:33:22 EDT 1994 # 21.00000 3.00000 940714 zatom, zion, pspdat # 1 1 2 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well # 1.80626423934776 .22824404341771 1.17378968127746 rchrg,fchrg,qchrg lines = _read_nlines(filename, -1) try: header = _dict_from_lines(lines[:4], [0, 3, 6, 3]) except ValueError: # The last record with rchrg ... seems to be optional. header = _dict_from_lines(lines[:3], [0, 3, 6]) summary = lines[0] header["dojo_report"] = DojoReport.from_file(filename) return NcAbinitHeader(summary, **header) @staticmethod def hgh_header(filename, ppdesc): """Parse the HGH abinit header.""" # Example: #Hartwigsen-Goedecker-Hutter psp for Ne, from PRB58, 3641 (1998) # 10 8 010605 zatom,zion,pspdat # 3 1 1 0 2001 0 pspcod,pspxc,lmax,lloc,mmax,r2well lines = _read_nlines(filename, -1) header = _dict_from_lines(lines[:3], [0, 3, 6]) summary = lines[0] header["dojo_report"] = DojoReport.from_file(filename) return NcAbinitHeader(summary, **header) @staticmethod def gth_header(filename, ppdesc): """Parse the GTH abinit header.""" # Example: #Goedecker-Teter-Hutter Wed May 8 14:27:44 EDT 1996 #1 1 960508 zatom,zion,pspdat #2 1 0 0 2001 0. pspcod,pspxc,lmax,lloc,mmax,r2well #0.2000000 -4.0663326 0.6778322 0 0 rloc, c1, c2, c3, c4 #0 0 0 rs, h1s, h2s #0 0 rp, h1p # 1.36 .2 0.6 rcutoff, rloc lines = _read_nlines(filename, -1) header = _dict_from_lines(lines[:3], [0, 3, 6]) summary = lines[0] header["dojo_report"] = DojoReport.from_file(filename) return NcAbinitHeader(summary, **header) @staticmethod def oncvpsp_header(filename, ppdesc): """Parse the ONCVPSP abinit header.""" # Example #Li ONCVPSP r_core= 2.01 3.02 # 3.0000 3.0000 140504 zatom,zion,pspd # 8 2 1 4 600 0 pspcod,pspxc,lmax,lloc,mmax,r2well # 5.99000000 0.00000000 0.00000000 rchrg fchrg qchrg # 2 2 0 0 0 nproj # 0 extension_switch # 0 -2.5000025868368D+00 -1.2006906995331D+00 # 1 0.0000000000000D+00 0.0000000000000D+00 0.0000000000000D+00 # 2 1.0000000000000D-02 4.4140499497377D-02 1.9909081701712D-02 lines = _read_nlines(filename, -1) header = _dict_from_lines(lines[:3], [0, 3, 6]) summary = lines[0] header.update({'pspdat': header['pspd']}) header.pop('pspd') try: header["dojo_report"] = DojoReport.from_file(filename) except DojoReport.Error: logger.warning('failed to read the dojo report for %s' % filename) header["dojo_report"] = None return NcAbinitHeader(summary, **header) @staticmethod def tm_header(filename, ppdesc): """Parse the TM abinit header.""" # Example: #Troullier-Martins psp for element Fm Thu Oct 27 17:28:39 EDT 1994 #100.00000 14.00000 940714 zatom, zion, pspdat # 1 1 3 0 2001 .00000 pspcod,pspxc,lmax,lloc,mmax,r2well # 0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp # .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm # 1 3.116 4.632 1 3.4291849 l,e99.0,e99.9,nproj,rcpsp # .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm # 2 4.557 6.308 1 2.1865358 l,e99.0,e99.9,nproj,rcpsp # .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm # 3 23.251 29.387 1 2.4776730 l,e99.0,e99.9,nproj,rcpsp # .00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm # 3.62474762267880 .07409391739104 3.07937699839200 rchrg,fchrg,qchrg lines = _read_nlines(filename, -1) header = [] for (lineno, line) in enumerate(lines): header.append(line) if lineno == 2: # Read lmax. tokens = line.split() pspcod, pspxc, lmax, lloc = map(int, tokens[:4]) mmax, r2well = map(float, tokens[4:6]) #if tokens[-1].strip() != "pspcod,pspxc,lmax,lloc,mmax,r2well": # raise RuntimeError("%s: Invalid line\n %s" % (filename, line)) lines = lines[3:] break # TODO # Parse the section with the projectors. #0 4.085 6.246 0 2.8786493 l,e99.0,e99.9,nproj,rcpsp #.00000000 .0000000000 .0000000000 .00000000 rms,ekb1,ekb2,epsatm projectors = OrderedDict() for idx in range(2*(lmax+1)): line = lines[idx] if idx % 2 == 0: proj_info = [line,] if idx % 2 == 1: proj_info.append(line) d = _dict_from_lines(proj_info, [5,4]) projectors[int(d["l"])] = d # Add the last line with info on nlcc. header.append(lines[idx+1]) summary = header[0] header = _dict_from_lines(header, [0,3,6,3]) header["dojo_report"] = DojoReport.from_file(filename) return NcAbinitHeader(summary, **header) class PawAbinitHeader(AbinitHeader): """The abinit header found in the PAW pseudopotential files.""" _attr_desc = namedtuple("att", "default astype") _VARS = { "zatom" : _attr_desc(None, _int_from_str), "zion" : _attr_desc(None, float), "pspdat" : _attr_desc(None, float), "pspcod" : _attr_desc(None, int), "pspxc" : _attr_desc(None, int), "lmax" : _attr_desc(None, int), "lloc" : _attr_desc(None, int), "mmax" : _attr_desc(None, int), "r2well" : _attr_desc(None, float), "pspfmt" : _attr_desc(None, str), "creatorID" : _attr_desc(None, int), "basis_size" : _attr_desc(None, int), "lmn_size" : _attr_desc(None, int), "orbitals" : _attr_desc(None, list), "number_of_meshes": _attr_desc(None, int), "r_cut" : _attr_desc(None, float), # r_cut(PAW) in the header "shape_type" : _attr_desc(None, int), "rshape" : _attr_desc(None, float), } del _attr_desc def __init__(self, summary, **kwargs): super(PawAbinitHeader, self).__init__() self.summary = summary.strip() for (key, desc) in self._VARS.items(): default, astype = desc.default, desc.astype value = kwargs.pop(key, None) if value is None: value = default if default is None: raise RuntimeError("Attribute %s must be specified" % key) else: try: value = astype(value) except: raise RuntimeError("Conversion Error for key %s, with value %s" % (key, value)) self[key] = value if kwargs: raise RuntimeError("kwargs should be empty but got %s" % str(kwargs)) @staticmethod def paw_header(filename, ppdesc): """Parse the PAW abinit header.""" #Paw atomic data for element Ni - Generated by AtomPAW (N. Holzwarth) + AtomPAW2Abinit v3.0.5 # 28.000 18.000 20061204 : zatom,zion,pspdat # 7 7 2 0 350 0. : pspcod,pspxc,lmax,lloc,mmax,r2well # paw3 1305 : pspfmt,creatorID # 5 13 : basis_size,lmn_size # 0 0 1 1 2 : orbitals # 3 : number_of_meshes # 1 3 350 1.1803778368E-05 3.5000000000E-02 : mesh 1, type,size,rad_step[,log_step] # 2 1 921 2.500000000000E-03 : mesh 2, type,size,rad_step[,log_step] # 3 3 391 1.1803778368E-05 3.5000000000E-02 : mesh 3, type,size,rad_step[,log_step] # 2.3000000000 : r_cut(SPH) # 2 0. # Example #C (US d-loc) - PAW data extracted from US-psp (D.Vanderbilt) - generated by USpp2Abinit v2.3.0 # 6.000 4.000 20090106 : zatom,zion,pspdat # 7 11 1 0 560 0. : pspcod,pspxc,lmax,lloc,mmax,r2well # paw4 2230 : pspfmt,creatorID # 4 8 : basis_size,lmn_size # 0 0 1 1 : orbitals # 5 : number_of_meshes # 1 2 560 1.5198032759E-04 1.6666666667E-02 : mesh 1, type,size,rad_step[,log_step] # 2 2 556 1.5198032759E-04 1.6666666667E-02 : mesh 2, type,size,rad_step[,log_step] # 3 2 576 1.5198032759E-04 1.6666666667E-02 : mesh 3, type,size,rad_step[,log_step] # 4 2 666 1.5198032759E-04 1.6666666667E-02 : mesh 4, type,size,rad_step[,log_step] # 5 2 673 1.5198032759E-04 1.6666666667E-02 : mesh 5, type,size,rad_step[,log_step] # 1.5550009124 : r_cut(PAW) # 3 0. : shape_type,rshape #Paw atomic data for element Si - Generated by atompaw v3.0.1.3 & AtomPAW2Abinit v3.3.1 # 14.000 4.000 20120814 : zatom,zion,pspdat # 7 11 1 0 663 0. : pspcod,pspxc,lmax,lloc,mmax,r2well # paw5 1331 : pspfmt,creatorID # 4 8 : basis_size,lmn_size # 0 0 1 1 : orbitals # 5 : number_of_meshes # 1 2 663 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 1, type,size,rad_step[,log_step] # 2 2 658 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 2, type,size,rad_step[,log_step] # 3 2 740 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 3, type,size,rad_step[,log_step] # 4 2 819 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 4, type,size,rad_step[,log_step] # 5 2 870 8.2129718540404674E-04 1.1498160595656655E-02 : mesh 5, type,size,rad_step[,log_step] # 1.5669671236 : r_cut(PAW) # 2 0. : shape_type,rshape supported_formats = ["paw3", "paw4", "paw5"] if ppdesc.format not in supported_formats: raise NotImplementedError("format %s not in %s" % (ppdesc.format, supported_formats)) lines = _read_nlines(filename, -1) summary = lines[0] header = _dict_from_lines(lines[:5], [0, 3, 6, 2, 2], sep=":") lines = lines[5:] # TODO # Parse orbitals and number of meshes. header["orbitals"] = [int(t) for t in lines[0].split(":")[0].split()] header["number_of_meshes"] = num_meshes = int(lines[1].split(":")[0]) #print filename, header # Skip meshes = lines = lines[2+num_meshes:] #for midx in range(num_meshes): # l = midx + 1 #print lines[0] header["r_cut"] = float(lines[0].split(":")[0]) #print lines[1] header.update(_dict_from_lines(lines[1], [2], sep=":")) report = DojoReport.from_file(filename) if report: header["dojo_report"] = report #print("PAW header\n", header) return PawAbinitHeader(summary, **header) class PseudoParserError(Exception): """Base Error class for the exceptions raised by :class:`PseudoParser`""" class PseudoParser(object): """ Responsible for parsing pseudopotential files and returning pseudopotential objects. Usage:: pseudo = PseudoParser().parse("filename") """ Error = PseudoParserError # Supported values of pspcod ppdesc = namedtuple("ppdesc", "pspcod name psp_type format") # TODO Recheck _PSPCODES = OrderedDict( { 1: ppdesc(1, "TM", "NC", None), 2: ppdesc(2, "GTH", "NC", None), 3: ppdesc(3, "HGH", "NC", None), #4: ppdesc(4, "NC", , None), #5: ppdesc(5, "NC", , None), 6: ppdesc(6, "FHI", "NC", None), 7: ppdesc(6, "PAW_abinit_text", "PAW", None), 8: ppdesc(8, "ONCVPSP", "NC", None), 10: ppdesc(10, "HGHK", "NC", None), }) del ppdesc # renumber functionals from oncvpsp todo confrim that 3 is 2 _FUNCTIONALS = {1: {'n': 4, 'name': 'Wigner'}, 2: {'n': 5, 'name': 'HL'}, 3: {'n': 2, 'name': 'PWCA'}, 4: {'n': 11, 'name': 'PBE'}} def __init__(self): # List of files that have been parsed succesfully. self._parsed_paths = [] # List of files that could not been parsed. self._wrong_paths = [] def scan_directory(self, dirname, exclude_exts=(), exclude_fnames=()): """ Analyze the files contained in directory dirname. Args: dirname: directory path exclude_exts: list of file extensions that should be skipped. exclude_fnames: list of file names that should be skipped. Returns: List of pseudopotential objects. """ for (i, ext) in enumerate(exclude_exts): if not ext.strip().startswith("."): exclude_exts[i] = "." + ext.strip() # Exclude files depending on the extension. paths = [] for fname in os.listdir(dirname): root, ext = os.path.splitext(fname) path = os.path.join(dirname, fname) if (ext in exclude_exts or fname in exclude_fnames or fname.startswith(".") or not os.path.isfile(path)): continue paths.append(path) pseudos = [] for path in paths: # Parse the file and generate the pseudo. try: pseudo = self.parse(path) except: pseudo = None if pseudo is not None: pseudos.append(pseudo) self._parsed_paths.extend(path) else: self._wrong_paths.extend(path) return pseudos def read_ppdesc(self, filename): """ Read the pseudopotential descriptor from file filename. Returns: Pseudopotential descriptor. None if filename is not a valid pseudopotential file. Raises: `PseudoParserError` if fileformat is not supported. """ if filename.endswith(".xml"): raise self.Error("XML pseudo not supported yet") else: # Assume file with the abinit header. lines = _read_nlines(filename, 80) for (lineno, line) in enumerate(lines): if lineno == 2: try: tokens = line.split() pspcod, pspxc = map(int, tokens[:2]) except: msg = "%s: Cannot parse pspcod, pspxc in line\n %s" % (filename, line) sys.stderr.write(msg) return None #if tokens[-1].strip().replace(" ","") not in ["pspcod,pspxc,lmax,lloc,mmax,r2well", # "pspcod,pspxc,lmax,llocal,mmax,r2well"]: # raise self.Error("%s: Invalid line\n %s" % (filename, line)) # return None if pspcod not in self._PSPCODES: raise self.Error("%s: Don't know how to handle pspcod %s\n" % (filename, pspcod)) ppdesc = self._PSPCODES[pspcod] if pspcod == 7: # PAW -> need to know the format pspfmt tokens = lines[lineno+1].split() pspfmt, creatorID = tokens[:2] #if tokens[-1].strip() != "pspfmt,creatorID": # raise self.Error("%s: Invalid line\n %s" % (filename, line)) # return None ppdesc = ppdesc._replace(format = pspfmt) return ppdesc return None def parse(self, filename): """ Read and parse a pseudopotential file. Main entry point for client code. Returns: pseudopotential object or None if filename is not a valid pseudopotential file. """ path = os.path.abspath(filename) # Only PAW supports XML at present. if filename.endswith(".xml"): return PawXmlSetup(path) ppdesc = self.read_ppdesc(path) if ppdesc is None: return None psp_type = ppdesc.psp_type parsers = { "FHI" : NcAbinitHeader.fhi_header, "GTH" : NcAbinitHeader.gth_header, "TM" : NcAbinitHeader.tm_header, "HGH" : NcAbinitHeader.hgh_header, "HGHK" : NcAbinitHeader.hgh_header, "ONCVPSP" : NcAbinitHeader.oncvpsp_header, "PAW_abinit_text": PawAbinitHeader.paw_header, } try: header = parsers[ppdesc.name](path, ppdesc) except Exception as exc: raise self.Error(path + ":\n" + straceback()) root, ext = os.path.splitext(path) if psp_type == "NC": pseudo = NcAbinitPseudo(path, header) elif psp_type == "PAW": pseudo = PawAbinitPseudo(path, header) else: raise NotImplementedError("psp_type not in [NC, PAW]") return pseudo #TODO use RadialFunction from pseudo_dojo. class RadialFunction(namedtuple("RadialFunction", "mesh values")): pass class PawXmlSetup(Pseudo, PawPseudo): def __init__(self, filepath): # FIXME self.dojo_report = {} self.path = os.path.abspath(filepath) # Get the XML root (this trick is used to that the object is pickleable). root = self.root # Get the version of the XML format self.paw_setup_version = root.get("version") # Info on the atom. atom_attrib = root.find("atom").attrib #self._symbol = atom_attrib["symbol"] self._zatom = int(float(atom_attrib["Z"])) self.core, self.valence = map(float, [atom_attrib["core"], atom_attrib["valence"]]) #xc_info = root.find("atom").attrib #self.xc_type, self.xc_name = xc_info["type"], xc_info["name"] #self.ae_energy = {k: float(v) for k,v in root.find("ae_energy").attrib.items()} # Old XML files do not define this field! # In this case we set the PAW radius to None. #self._paw_radius = float(root.find("PAW_radius").attrib["rpaw"]) pawr_element = root.find("PAW_radius") self._paw_radius = None if pawr_element is not None: self._paw_radius = float(pawr_element.attrib["rpaw"]) #<valence_states> # <state n="2" l="0" f="2" rc="1.10" e="-0.6766" id="N-2s"/> # <state n="2" l="1" f="3" rc="1.10" e="-0.2660" id="N-2p"/> # <state l="0" rc="1.10" e=" 0.3234" id="N-s1"/> # <state l="1" rc="1.10" e=" 0.7340" id="N-p1"/> # <state l="2" rc="1.10" e=" 0.0000" id="N-d1"/> #</valence_states> # # The valence_states element contains several state elements. # For this setup, the first two lines describe bound eigenstates # with occupation numbers and principal quantum numbers. # Notice, that the three additional unbound states should have no f and n attributes. # In this way, we know that only the first two bound states (with f and n attributes) # should be used for constructing an initial guess for the wave functions. self.valence_states = {} for node in root.find("valence_states"): attrib = AttrDict(node.attrib) assert attrib.id not in self.valence_states self.valence_states[attrib.id] = attrib #print(self.valence_states) # Parse the radial grids self.rad_grids = {} for node in root.findall("radial_grid"): grid_params = node.attrib gid = grid_params["id"] assert gid not in self.rad_grids self.rad_grids[id] = self._eval_grid(grid_params) def __getstate__(self): """ Return state is pickled as the contents for the instance. In this case we just remove the XML root element process since Element object cannot be pickled. """ return {k: v for k, v in self.__dict__.items() if k not in ["_root"]} @property def root(self): try: return self._root except AttributeError: from xml.etree import cElementTree as Et tree = Et.parse(self.filepath) self._root = tree.getroot() return self._root @property def Z(self): return self._zatom @property def Z_val(self): """Number of valence electrons.""" return self.valence # FIXME @property def l_max(self): """Maximum angular momentum.""" return None @property def l_local(self): """Angular momentum used for the local part.""" return None @property def summary(self): """String summarizing the most important properties.""" return "" @property def paw_radius(self): return self._paw_radius @staticmethod def _eval_grid(grid_params): """ This function receives a dictionary with the parameters defining the radial mesh and returns a `ndarray` with the mesh """ eq = grid_params.get("eq").replace(" ", "") istart, iend = int(grid_params.get("istart")), int(grid_params.get("iend")) indices = list(range(istart, iend+1)) if eq == 'r=a*exp(d*i)': a, d = float(grid_params['a']), float(grid_params['d']) mesh = [a * np.exp(d * i) for i in indices] elif eq == 'r=a*i/(n-i)': a, n = float(grid_params['a']), float(grid_params['n']) mesh = [a * i / (n - i) for i in indices] elif eq == 'r=a*(exp(d*i)-1)': a, d = float(grid_params['a']), float(grid_params['d']) mesh = [a * (np.exp(d * i) - 1.0) for i in indices] elif eq == 'r=d*i': d = float(grid_params['d']) mesh = [d * i for i in indices] elif eq == 'r=(i/n+a)^5/a-a^4': a, n = float(grid_params['a']), float(grid_params['n']) mesh = [(i / n + a)**5 / a - a**4 for i in indices] else: raise ValueError('Unknown grid type: %s' % eq) return np.array(mesh) def _parse_radfunc(self, func_name): """Parse the first occurence of func_name in the XML file.""" node = self.root.find(func_name) grid = node.attrib["grid"] values = np.array([float(s) for s in node.text.split()]) return self.rad_grids[grid], values, node.attrib def _parse_all_radfuncs(self, func_name): """Parse all the nodes with tag func_name in the XML file.""" for node in self.root.findall(func_name): grid = node.attrib["grid"] values = np.array([float(s) for s in node.text.split()]) yield self.rad_grids[grid], values, node.attrib @property def ae_core_density(self): """The all-electron radial density.""" try: return self._ae_core_density except AttributeError: mesh, values, attrib = self._parse_radfunc("ae_core_density") self._ae_core_density = RadialFunction(mesh, values) return self._ae_core_density @property def pseudo_core_density(self): """The pseudized radial density.""" try: return self._pseudo_core_density except AttributeError: mesh, values, attrib = self._parse_radfunc("pseudo_core_density") self._pseudo_core_density = RadialFunction(mesh, values) return self._pseudo_core_density @property def ae_partial_waves(self): """Dictionary with the AE partial waves indexed by state.""" try: return self._ae_partial_waves except AttributeError: self._ae_partial_waves = {} for (mesh, values, attrib) in self._parse_all_radfuncs("ae_partial_wave"): state = attrib["state"] val_state = self.valence_states[state] self._ae_partial_waves[state] = RadialFunction(mesh, values) #print("val_state", val_state) return self._ae_partial_waves @property def pseudo_partial_waves(self): """Dictionary with the pseudo partial waves indexed by state.""" try: return self._pseudo_partial_waves except AttributeError: self._pseudo_partial_waves = {} for (mesh, values, attrib) in self._parse_all_radfuncs("pseudo_partial_wave"): state = attrib["state"] val_state = self.valence_states[state] self._pseudo_partial_waves[state] = RadialFunction(mesh, values) return self._pseudo_partial_waves @property def projector_functions(self): """Dictionary with the PAW projectors indexed by state.""" try: return self._projector_functions except AttributeError: self._projector_functions = {} for (mesh, values, attrib) in self._parse_all_radfuncs("projector_function"): state = attrib["state"] val_state = self.valence_states[state] self._projector_functions[state] = RadialFunction(mesh, values) return self._projector_functions @add_fig_kwargs def plot_densities(self, ax=None, **kwargs): """ Plot the PAW densities. Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. Returns: `matplotlib` figure """ ax, fig, plt = get_ax_fig_plt(ax) ax.grid(True) ax.set_xlabel('r [Bohr]') #ax.set_ylabel('density') for i, den_name in enumerate(["ae_core_density", "pseudo_core_density"]): rden = getattr(self, den_name) label = "$n_c$" if i == 1 else "$\\tilde{n}_c$" ax.plot(rden.mesh, rden.mesh * rden.values, label=label, lw=2) ax.legend(loc="best") return fig @add_fig_kwargs def plot_waves(self, ax=None, **kwargs): """ Plot the AE and the pseudo partial waves. Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. Returns: `matplotlib` figure """ ax, fig, plt = get_ax_fig_plt(ax) ax.grid(True) ax.set_xlabel("r [Bohr]") ax.set_ylabel("$r\phi,\\, r\\tilde\phi\, [Bohr]^{-\\frac{1}{2}}$") ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--") #ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1)) for state, rfunc in self.pseudo_partial_waves.items(): ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="PS-WAVE: " + state) for state, rfunc in self.ae_partial_waves.items(): ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, lw=2, label="AE-WAVE: " + state) ax.legend(loc="best") return fig @add_fig_kwargs def plot_projectors(self, ax=None, **kwargs): """ Plot the PAW projectors. Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. Returns: `matplotlib` figure """ ax, fig, plt = get_ax_fig_plt(ax) title = kwargs.pop("title", "Projectors") ax.grid(True) ax.set_xlabel('r [Bohr]') ax.set_ylabel("$r\\tilde p\, [Bohr]^{-\\frac{1}{2}}$") ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--") #ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1)) for state, rfunc in self.projector_functions.items(): ax.plot(rfunc.mesh, rfunc.mesh * rfunc.values, label="TPROJ: " + state) ax.legend(loc="best") return fig #@add_fig_kwargs #def plot_potentials(self, **kwargs): # """ # ================ ============================================================== # kwargs Meaning # ================ ============================================================== # title Title of the plot (Default: None). # show True to show the figure (Default). # savefig 'abc.png' or 'abc.eps' to save the figure to a file. # ================ ============================================================== # Returns: # `matplotlib` figure # """ # title = kwargs.pop("title", "Potentials") # show = kwargs.pop("show", True) # savefig = kwargs.pop("savefig", None) # import matplotlib.pyplot as plt # fig = plt.figure() # ax = fig.add_subplot(1,1,1) # ax.grid(True) # ax.set_xlabel('r [Bohr]') # ax.set_ylabel('density') # ax.axvline(x=self.paw_radius, linewidth=2, color='k', linestyle="--") # ax.annotate("$r_c$", xy=(self.paw_radius + 0.1, 0.1)) # for state, rfunc in self.potentials.items(): # ax.plot(rfunc.mesh, rfunc.values, label="TPROJ: " + state) # ax.legend(loc="best") # if title is not None: fig.suptitle(title) # if show: plt.show() # if savefig: fig.savefig(savefig) # return fig class PseudoTable(six.with_metaclass(abc.ABCMeta, collections.Sequence, MSONable, object)): """ Define the pseudopotentials from the element table. Individidual elements are accessed by name, symbol or atomic number. For example, the following all retrieve iron: print elements[26] Fe print elements.Fe Fe print elements.symbol('Fe') Fe print elements.name('iron') Fe print elements.isotope('Fe') Fe """ @classmethod def as_table(cls, items): """ Return an instance of :class:`PseudoTable` from the iterable items. """ if isinstance(items, cls): return items return cls(items) @classmethod def from_dir(cls, top, exts=None, exclude_dirs="_*"): """ Find all pseudos in the directory tree starting from top. Args: top: Top of the directory tree exts: List of files extensions. if exts == "all_files" we try to open all files in top exclude_dirs: Wildcard used to exclude directories. return: :class:`PseudoTable` sorted by atomic number Z. """ pseudos = [] if exts == "all_files": for f in [os.path.join(top, fn) for fn in os.listdir(top)]: if os.path.isfile(f): try: p = Pseudo.from_file(f) if p: pseudos.append(p) else: logger.info('Skipping file %s' % f) except: logger.info('Skipping file %s' % f) if not pseudos: logger.warning('No pseudopotentials parsed from folder %s' % top) return None logger.info('Creating PseudoTable with %i pseudopotentials' % len(pseudos)) else: if exts is None: exts=("psp8",) for p in find_exts(top, exts, exclude_dirs=exclude_dirs): try: pseudos.append(Pseudo.from_file(p)) except Exception as exc: logger.critical("Error in %s:\n%s" % (p, exc)) return cls(pseudos).sort_by_z() def __init__(self, pseudos): """ Args: pseudos: List of pseudopotentials or filepaths """ # Store pseudos in a default dictionary with z as key. # Note that we can have more than one pseudo for given z. # hence the values are lists of pseudos. if not isinstance(pseudos, collections.Iterable): pseudos = [pseudos] if len(pseudos) and is_string(pseudos[0]): pseudos = list_strings(pseudos) self._pseudos_with_z = defaultdict(list) for pseudo in pseudos: p = pseudo if not isinstance(pseudo, Pseudo): p = Pseudo.from_file(pseudo) self._pseudos_with_z[p.Z].append(p) for z in self.zlist: pseudo_list = self._pseudos_with_z[z] symbols = [p.symbol for p in pseudo_list] symbol = symbols[0] if any(symb != symbol for symb in symbols): raise ValueError("All symbols must be equal while they are: %s" % str(symbols)) setattr(self, symbol, pseudo_list) def __getitem__(self, Z): """ Retrieve pseudos for the atomic number z. Accepts both int and slice objects. """ if isinstance(Z, slice): assert Z.stop is not None pseudos = [] for znum in iterator_from_slice(Z): pseudos.extend(self._pseudos_with_z[znum]) return self.__class__(pseudos) else: return self.__class__(self._pseudos_with_z[Z]) def __len__(self): return len(list(self.__iter__())) def __iter__(self): """Process the elements in Z order.""" for z in self.zlist: for pseudo in self._pseudos_with_z[z]: yield pseudo def __repr__(self): return "<%s at %s>" % (self.__class__.__name__, id(self)) def __str__(self): lines = [] app = lines.append app("<%s, len=%d>" % (self.__class__.__name__, len(self))) for pseudo in self: app(str(pseudo)) return "\n".join(lines) @property def allnc(self): """True if all pseudos are norm-conserving.""" return all(p.isnc for p in self) @property def allpaw(self): """True if all pseudos are PAW.""" return all(p.ispaw for p in self) @property def zlist(self): """Ordered list with the atomic numbers available in the table.""" return sorted(list(self._pseudos_with_z.keys())) def as_dict(self, **kwargs): d = {} for p in self: k, count = p.element, 1 # Handle multiple-pseudos with the same name! while k in d: k += k.split("#")[0] + "#" + str(count) count += 1 d.update({k: p.as_dict()}) d['@module'] = self.__class__.__module__ d['@class'] = self.__class__.__name__ return d @classmethod def from_dict(cls, d): pseudos = [] dec = MontyDecoder() for k, v in d.items(): if not k.startswith('@'): pseudos.append(dec.process_decoded(v)) return cls(pseudos) def is_complete(self, zmax=118): """ True if table is complete i.e. all elements with Z < zmax have at least on pseudopotential """ for z in range(1, zmax): if not self[z]: return False return True def all_combinations_for_elements(self, element_symbols): """ Return a list with all the the possible combination of pseudos for the given list of element_symbols. Each item is a list of pseudopotential objects. Example:: table.all_combinations_for_elements(["Li", "F"]) """ d = OrderedDict() for symbol in element_symbols: d[symbol] = self.select_symbols(symbol, ret_list=True) from itertools import product all = product(*d.values()) return list(all) def pseudo_with_symbol(self, symbol, allow_multi=False): """ Return the pseudo with the given chemical symbol. Args: symbols: String with the chemical symbol of the element allow_multi: By default, the method raises ValueError if multiple occurrences are found. Use allow_multi to prevent this. Raises: ValueError if symbol is not found or multiple occurences are present and not allow_multi """ pseudos = self.select_symbols(symbol, ret_list=True) if not pseudos or (len(pseudos) > 1 and not allow_multi): raise ValueError("Found %d occurrences of symbol %s" % (len(pseudos), symbol)) if not allow_multi: return pseudos[0] else: return pseudos def pseudos_with_symbols(self, symbols): """ Return the pseudos with the given chemical symbols. Raises: ValueError if one of the symbols is not found or multiple occurences are present. """ pseudos = self.select_symbols(symbols, ret_list=True) found_symbols = [p.symbol for p in pseudos] duplicated_elements = [s for s, o in collections.Counter(found_symbols).items() if o > 1] if duplicated_elements: raise ValueError("Found multiple occurrences of symbol(s) %s" % ', '.join(duplicated_elements)) missing_symbols = [s for s in symbols if s not in found_symbols] if missing_symbols: raise ValueError("Missing data for symbol(s) %s" % ', '.join(missing_symbols)) return pseudos def select_symbols(self, symbols, ret_list=False): """ Return a :class:`PseudoTable` with the pseudopotentials with the given list of chemical symbols. Args: symbols: str or list of symbols Prepend the symbol string with "-", to exclude pseudos. ret_list: if True a list of pseudos is returned instead of a :class:`PseudoTable` """ symbols = list_strings(symbols) exclude = symbols[0].startswith("-") if exclude: if not all(s.startswith("-") for s in symbols): raise ValueError("When excluding symbols, all strings must start with `-`") symbols = [s[1:] for s in symbols] #print(symbols) symbols = set(symbols) pseudos = [] for p in self: if exclude: if p.symbol in symbols: continue else: if p.symbol not in symbols: continue pseudos.append(p) if ret_list: return pseudos else: return self.__class__(pseudos) def get_pseudos_for_structure(self, structure): """ Return the list of :class:`Pseudo` objects to be used for this :class:`Structure`. Args: structure: pymatgen :class:`Structure`. Raises: `ValueError` if one of the chemical symbols is not found or multiple occurences are present in the table. """ symbols = structure.symbol_set return self.pseudos_with_symbols(symbols) #def list_properties(self, *props, **kw): # """ # Print a list of elements with the given set of properties. # Args: # *prop1*, *prop2*, ... : string # Name of the properties to print # *format*: string # Template for displaying the element properties, with one # % for each property. # For example, print a table of mass and density. # from periodictable import elements # elements.list_properties('symbol','mass','density', format="%-2s: %6.2f u %5.2f g/cm^3") # H : 1.01 u 0.07 g/cm^3 # He: 4.00 u 0.12 g/cm^3 # Li: 6.94 u 0.53 g/cm^3 # ... # Bk: 247.00 u 14.00 g/cm^3 # """ # format = kw.pop('format', None) # assert len(kw) == 0 # for pseudo in self: # try: # values = tuple(getattr(pseudo, p) for p in props) # except AttributeError: # # Skip elements which don't define all the attributes # continue # # Skip elements with a value of None # if any(v is None for v in values): # continue # if format is None: # print(" ".join(str(p) for p in values)) # else: # try: # print(format % values) # except: # print("format",format,"args",values) # raise #def print_table(self, stream=sys.stdout, filter_function=None): # """ # A pretty ASCII printer for the periodic table, based on some filter_function. # Args: # filter_function: # A filtering function that take a Pseudo as input and returns a boolean. # For example, setting filter_function = lambda el: el.Z_val > 2 will print # a periodic table containing only pseudos with Z_val > 2. # """ # for row in range(1, 10): # rowstr = [] # for group in range(1, 19): # el = Element.from_row_and_group(row, group) # if el and ((not filter_function) or filter_function(el)): # rowstr.append("{:3s}".format(el.symbol)) # else: # rowstr.append(" ") # print(" ".join(rowstr)) def sorted(self, attrname, reverse=False): """ Sort the table according to the value of attribute attrname. Return: New class:`PseudoTable` object """ attrs = [] for i, pseudo in self: try: a = getattr(pseudo, attrname) except AttributeError: a = np.inf attrs.append((i, a)) # Sort attrs, and build new table with sorted pseudos. return self.__class__([self[a[0]] for a in sorted(attrs, key=lambda t: t[1], reverse=reverse)]) def sort_by_z(self): """Return a new :class:`PseudoTable` with pseudos sorted by Z""" return self.__class__(sorted(self, key=lambda p: p.Z)) def select(self, condition): """ Select only those pseudopotentials for which condition is True. Return new class:`PseudoTable` object. Args: condition: Function that accepts a :class:`Pseudo` object and returns True or False. """ return self.__class__([p for p in self if condition(p)]) def with_dojo_report(self): """Select pseudos containing the DOJO_REPORT section. Return new class:`PseudoTable` object.""" return self.select(condition=lambda p: p.has_dojo_report) def get_dojo_dataframe(self, **kwargs): """ Buid a pandas :class:`DataFrame` with the most important parameters extracted from the `DOJO_REPORT` section of each pseudo in the table. Returns: frame, errors where frame is the pandas :class:`DataFrame` and errors is a list of errors encountered while trying to read the `DOJO_REPORT` from the pseudopotential file. """ accuracies = ["low", "normal", "high"] trial2keys = { "deltafactor": ["dfact_meV", "dfactprime_meV"] + ["v0", "b0_GPa", "b1"], "gbrv_bcc": ["a0_rel_err"], "gbrv_fcc": ["a0_rel_err"], "phonon": "all", #"phwoa": "all" } rows, names, errors = [], [], [] for p in self: report = p.dojo_report #assert "version" in report if "version" not in report: print("ignoring old report in ", p.basename) continue d = {"symbol": p.symbol, "Z": p.Z} names.append(p.basename) #read hints for acc in accuracies: try: d.update({acc + "_ecut_hint": report['hints'][acc]['ecut']}) except KeyError: d.update({acc + "_ecut_hint": -1.0 }) # FIXME try: ecut_acc = dict( low=report.ecuts[2], normal=report.ecuts[int(len(report.ecuts)/2)], high=report.ecuts[-2], ) except IndexError: ecut_acc = dict( low=report.ecuts[0], normal=report.ecuts[-1], high=report.ecuts[-1], ) for acc in accuracies: d[acc + "_ecut"] = ecut_acc[acc] try: for trial, keys in trial2keys.items(): data = report.get(trial, None) if data is None: continue # if the current trial has an entry for this ecut notting changes, else we take the ecut closes ecut_acc = dict( low=sorted(data.keys())[0], normal=sorted(data.keys())[int(len(data.keys())/2)], high=sorted(data.keys())[-1], ) for acc in accuracies: ecut = ecut_acc[acc] if keys is 'all': ecuts = data d.update({acc + "_" + trial: data[ecut]}) else: if trial.startswith("gbrv"): d.update({acc + "_" + trial + "_" + k: float(data[ecut][k]) for k in keys}) else: d.update({acc + "_" + k: float(data[ecut][k]) for k in keys}) except Exception as exc: logger.warning("%s raised %s" % (p.basename, exc)) errors.append((p.basename, str(exc))) #print(d) rows.append(d) # Build sub-class of pandas.DataFrame return DojoDataFrame(rows, index=names), errors def select_rows(self, rows): """ Return new class:`PseudoTable` object with pseudos in the given rows of the periodic table. rows can be either a int or a list of integers. """ if not isinstance(rows, (list, tuple)): rows = [rows] return self.__class__([p for p in self if p.element.row in rows]) def select_family(self, family): # e.g element.is_alkaline return self.__class__([p for p in self if getattr(p.element, "is_" + family)]) def dojo_compare(self, what="all", **kwargs): """Compare ecut convergence and Deltafactor, GBRV results""" import matplotlib.pyplot as plt show = kwargs.pop("show", True) what = list_strings(what) figs = [] if all(p.dojo_report.has_trial("deltafactor") for p in self) and \ any(k in what for k in ("all", "ecut")): fig_etotal, ax_list = plt.subplots(nrows=len(self), ncols=1, sharex=True, squeeze=True) #ax_list, fig, plt = get_axarray_fig_plt(ax_list, nrows=len(self), ncols=1, sharex=True, squeeze=True) figs.append(fig_etotal) for ax, pseudo in zip(ax_list, self): pseudo.dojo_report.plot_etotal_vs_ecut(ax=ax, show=False, label=pseudo.basename) if show: plt.show() if all(p.dojo_report.has_trial("deltafactor") for p in self) and \ any(k in what for k in ("all", "df", "deltafactor")): fig_deltafactor, ax_grid = plt.subplots(nrows=5, ncols=len(self), sharex=True, sharey="row", squeeze=False) #ax_list, fig, plt = get_axarray_fig_plt(ax_list, nrows=5, ncols=len(self), sharex=True, sharey="row", squeeze=False)) figs.append(fig_deltafactor) for ax_list, pseudo in zip(ax_grid.T, self): pseudo.dojo_report.plot_deltafactor_convergence(ax_list=ax_list, show=False) fig_deltafactor.suptitle(" vs ".join(p.basename for p in self)) if show: plt.show() # Compare GBRV results if all(p.dojo_report.has_trial("gbrv_bcc") for p in self) and \ any(k in what for k in ("all", "gbrv")): fig_gbrv, ax_grid = plt.subplots(nrows=2, ncols=len(self), sharex=True, sharey="row", squeeze=False) figs.append(fig_gbrv) #ax_list, fig, plt = get_axarray_fig_plt(ax_list, ncols=len(self), sharex=True, sharey="row", squeeze=False)) for ax_list, pseudo in zip(ax_grid.T, self): pseudo.dojo_report.plot_gbrv_convergence(ax_list=ax_list, show=False) fig_gbrv.suptitle(" vs ".join(p.basename for p in self)) if show: plt.show() return figs @classmethod @deprecated(replacement=from_dir) def from_directory(cls, path): pseudos = [] for f in [os.path.join(path, fn) for fn in os.listdir(path)]: if os.path.isfile(f): try: p = Pseudo.from_file(f) if p: pseudos.append(p) else: logger.info('Skipping file %s' % f) except: logger.info('Skipping file %s' % f) if not pseudos: logger.warning('No pseudopotentials parsed from folder %s' % path) return None logger.info('Creating PseudoTable with %i pseudopotentials' % len(pseudos)) return cls(pseudos) try: from pandas import DataFrame except ImportError: DataFrame = object class DojoDataFrame(DataFrame): """Extends pandas DataFrame adding helper functions.""" ALL_ACCURACIES = ("low", "normal", "high") ALL_TRIALS = ( "ecut", "deltafactor", "gbrv_bcc", "gbrv_fcc", "phonon", #"phwoa" ) _TRIALS2KEY = { "ecut": "ecut", "deltafactor": "dfact_meV", "gbrv_bcc": "gbrv_bcc_a0_rel_err", "gbrv_fcc": "gbrv_fcc_a0_rel_err", "phonon": "all", #"phwoa": "all" } _TRIALS2YLABEL = { "ecut": "Ecut [Ha]", "deltafactor": "$\Delta$-factor [meV]", "gbrv_bcc": "BCC $\Delta a_0$ (%)", "gbrv_fcc": "FCC $\Delta a_0$ (%)", "phonon": "Phonons with ASR", #"phwoa": "Phonons without ASR" } ACC2PLTOPTS = dict( low=dict(color="red"), normal=dict(color="blue"), high=dict(color="green"), ) for v in ACC2PLTOPTS.values(): v.update(linewidth=2, linestyle='dashed', marker='o', markersize=8) def tabulate(self, columns=None, stream=sys.stdout): from tabulate import tabulate if columns is None: accuracies = self.ALL_ACCURACIES columns = [acc + "_dfact_meV" for acc in accuracies] columns += [acc + "_ecut" for acc in accuracies] columns += [acc + "_gbrv_fcc_a0_rel_err" for acc in accuracies] columns += [acc + "_gbrv_bcc_a0_rel_err" for acc in accuracies] #return self[columns].to_html() tablefmt = "grid" floatfmt=".2f" stream.write(tabulate(self[columns], headers="keys", tablefmt=tablefmt, floatfmt=floatfmt)) def get_accuracy(self, accuracy): columns = [c for c in self if c.startswith(accuracy)] return self.__class__(data=self[columns]) def get_trials(self, accuracies="all"): accuracies = self.ALL_ACCURACIES if accuracies == "all" else list_strings(accuracies) columns = [acc + "_dfact_meV" for acc in accuracies] columns += [acc + "_ecut" for acc in accuracies] columns += [acc + "_gbrv_fcc_a0_rel_err" for acc in accuracies] columns += [acc + "_gbrv_bcc_a0_rel_err" for acc in accuracies] return self.__class__(data=self[columns]) def select_rows(self, rows): if not isinstance(rows, (list, tuple)): rows = [rows] data = [] for index, entry in self.iterrows(): element = _PTABLE[entry.Z] if element.row in rows: data.append(entry) return self.__class__(data=data) def select_family(self, family): data = [] for index, entry in self.iterrows(): element = _PTABLE[entry.Z] # e.g element.is_alkaline if getattr(element, "is_" + family): data.append(entry) return self.__class__(data=data) @add_fig_kwargs def plot_hist(self, what="dfact_meV", bins=400, **kwargs): import matplotlib.pyplot as plt fig, ax_list = plt.subplots(nrows=len(self.ALL_ACCURACIES), ncols=1, sharex=True, sharey=False, squeeze=True) #ax_list, fig, plt = get_axarray_fig_plt(ax_list, nrows=len(self.ALL_ACCURACIES), ncols=1, sharex=True, sharey=False, squeeze=True) for acc, ax in zip(self.ALL_ACCURACIES, ax_list): col = acc + "_" + what #print(col) #self[col].hist(ax=ax, bins=bins, label=col) self[col].plot(ax=ax, kind="bar", label=col) return fig @add_fig_kwargs def plot_trials(self, trials="all", accuracies="all", **kwargs): import matplotlib.pyplot as plt trials = self.ALL_TRIALS if trials == "all" else list_strings(trials) accuracies = self.ALL_ACCURACIES if accuracies == "all" else list_strings(accuracies) fig, ax_list = plt.subplots(nrows=len(trials), ncols=1, sharex=True, sharey=False, squeeze=True) #ax_list, fig, plt = get_axarray_fig_plt(ax_list, nrows=len(trials), ncols=1, sharex=True, sharey=False, squeeze=True) # See also http://matplotlib.org/examples/pylab_examples/barchart_demo.html for i, (trial, ax) in enumerate(zip(trials, ax_list)): what = self._TRIALS2KEY[trial] ax.set_ylabel(self._TRIALS2YLABEL[trial]) minval, maxval = np.inf, -np.inf for acc in accuracies: col = acc + "_" + what legend = i == 0 data = self[col] minval, maxval = min(minval, data.min()), max(maxval, data.max()) data.plot(ax=ax, legend=legend, use_index=True, label=acc, **self.ACC2PLTOPTS[acc]) #data.plot(ax=ax, kind="bar") if i == 0: ax.legend(loc='best', shadow=True, frameon=True) #fancybox=True) ax.set_xticks(range(len(data.index))) ax.set_xticklabels(data.index) #ax.set_xticklabels([root for root, ext in map(os.path.splitext, data.index)]) # Set ylimits #stepsize = None #if "gbrv" in trial: # ax.hlines(0.0, 0, len(data.index)) # #start, end = -0.6, +0.6 # start, end = max(-0.6, minval), min(+0.6, maxval) # if end - start < 0.05: end = start + 0.1 # ax.set_ylim(start, end) # ax.yaxis.set_ticks(np.arange(start, end, 0.05)) if trial == "deltafactor": #start, end = 0.0, 15 start, end = 0.0, min(15, maxval) ax.set_ylim(start, end) #ax.yaxis.set_ticks(np.arange(start, end, 0.1)) #if stepsize is not None: # start, end = ax.get_ylim() # ax.yaxis.set_ticks(np.arange(start, end, stepsize)) plt.setp(ax.xaxis.get_majorticklabels(), rotation=25) return fig class DojoReportError(Exception): """Exception raised by DoJoReport.""" class DojoReport(dict): """Dict-like object with the dojo report.""" _TRIALS2KEY = { "deltafactor": "dfact_meV", "gbrv_bcc": "a0_rel_err", "gbrv_fcc": "a0_rel_err", #"phwoa": "all", "phonon": "all" } ALL_ACCURACIES = ("low", "normal", "high") # List of dojo_trials # Remember to update the list if you add a new test to the DOJO_REPORT ALL_TRIALS = ( "deltafactor", "gbrv_bcc", "gbrv_fcc", "phonon", #"phwoa" ) # Tolerances on the deltafactor prime (in eV) used for the hints. #ATOLS = (1.0, 0.2, 0.04) ATOLS = (0.5, 0.1, 0.02) Error = DojoReportError @classmethod def from_file(cls, filepath): """Read the DojoReport from file.""" with open(filepath, "rt") as fh: lines = fh.readlines() try: start = lines.index("<DOJO_REPORT>\n") except ValueError: return {} stop = lines.index("</DOJO_REPORT>\n") #print("start, stop" ,start, stop) #print("".join(lines[start+1:stop])) d = json.loads("".join(lines[start+1:stop])) return cls(**d) @classmethod def from_hints(cls, ppgen_ecut, symbol): """Initialize the DojoReport from an initial value of ecut in Hartree.""" dense_right = np.arange(ppgen_ecut, ppgen_ecut + 6*2, step=2) dense_left = np.arange(max(ppgen_ecut-6, 2), ppgen_ecut, step=2) coarse_high = np.arange(ppgen_ecut + 15, ppgen_ecut + 35, step=5) ecut_list = list(dense_left) + list(dense_right) + list(coarse_high) return cls(ecut_list=ecut_list, symbol=symbol) #, **{k: {}: for k in self.ALL_TRIALS}) def __init__(self, *args, **kwargs): super(DojoReport, self).__init__(*args, **kwargs) try: for trial in self.ALL_TRIALS: # Convert ecut to float and build an OrderedDict (results are indexed by ecut in ascending order) try: d = self[trial] except KeyError: continue ecuts_keys = sorted([(float(k), k) for k in d], key=lambda t:t[0]) ord = OrderedDict([(t[0], d[t[1]]) for t in ecuts_keys]) self[trial] = ord except ValueError: raise self.Error('Error while initializing the dojo report') #def __str__(self): # stream = six.moves.StringIO() # pprint.pprint(self, stream=stream, indent=2, width=80) # return stream.getvalue() @property def symbol(self): """Chemical symbol.""" return self["symbol"] @property def element(self): """Element object.""" return Element(self.symbol) @property def has_hints(self): """True if hints on cutoff energy are present.""" return "hints" in self @property def ecuts(self): """Numpy array with the list of ecuts that should be present in the dojo_trial sub-dicts""" return self["ecuts"] @property def trials(self): """Set of strings with the trials present in the report.""" return set(list(self.keys())).intersection(self.ALL_TRIALS) def has_trial(self, dojo_trial, ecut=None): """ True if the dojo_report contains dojo_trial with the given ecut. If ecut is not, we test if dojo_trial is present. """ if dojo_trial not in self.ALL_TRIALS: raise self.Error("dojo_trial `%s` is not a registered DOJO TRIAL" % dojo_trial) if ecut is None: return dojo_trial in self else: #key = self._ecut2key(ecut) key = ecut try: self[dojo_trial][key] return True except KeyError: return False def add_ecuts(self, new_ecuts): """Add a list of new ecut values.""" # Be careful with the format here! it should be %.1f # Select the list of ecuts reported in the DOJO section. prev_ecuts = self["ecuts"] for i in range(len(prev_ecuts)-1): if prev_ecuts[i] >= prev_ecuts[i+1]: raise self.Error("Ecut list is not ordered:\n %s" % prev_ecuts) from monty.bisect import find_le for e in new_ecuts: # Find rightmost value less than or equal to x. if e < prev_ecuts[0]: i = 0 elif e > prev_ecuts[-1]: i = len(prev_ecuts) else: i = find_le(prev_ecuts, e) assert prev_ecuts[i] != e i += 1 prev_ecuts.insert(i, e) def add_hints(self, hints): hints_dict = { "low": {'ecut': hints[0]}, "normal" : {'ecut': hints[1]}, "high" : {'ecut': hints[2]} } self["hints"] = hints_dict #def validate(self, hints): # Add md5 hash value # self["validated"] = True @staticmethod def _ecut2key(ecut): """Convert ecut to a valid key. ecut can be either a string or a float.""" if is_string(ecut): # Validate string i = ecut.index(".") if len(ecut[i+1:]) != 1: raise ValueError("string %s must have one digit") return ecut else: # Assume float return "%.1f" % ecut def add_entry(self, dojo_trial, ecut, d, overwrite=False): """ Add an entry computed with the given ecut to the sub-dictionary associated to dojo_trial. Args: dojo_trial: ecut: d: overwrite: """ if dojo_trial not in self.ALL_TRIALS: raise ValueError("%s is not a registered trial") section = self.get(dojo_trial, {}) key = self._ecut2key(ecut) if key in section and not overwrite: raise self.Error("Cannot overwrite key %s in dojo_trial %s" % (key, dojo_trial)) section[key] = d def find_missing_entries(self): """ check the DojoReport. This function tests if each trial contains an ecut entry. Return a dictionary {trial_name: [list_of_missing_ecuts]} mapping the name of the Dojo trials to the list of ecut values that are missing """ d = {} for trial in self.ALL_TRIALS: data = self.get(trial, None) if data is None: # Gbrv results do not contain noble gases so ignore the error if "gbrv" in trial and self.element.is_noble_gas: assert data is None continue d[trial] = self.ecuts else: computed_ecuts = self[trial].keys() for e in self.ecuts: if e not in computed_ecuts: if trial not in d: d[trial] = [] d[trial].append(e) if not d: assert len(computed_ecuts) == len(self.ecuts) return d def print_table(self, stream=sys.stdout): from monty.pprint import pprint_table pprint_table(self.get_dataframe(), out=stream) @add_fig_kwargs def plot_etotal_vs_ecut(self, ax=None, inv_ecut=False, **kwargs): """ plot the convergence of the total energy as function of the energy cutoff ecut Args: ax: matplotlib Axes, if ax is None a new figure is created. Returns: `matplotlib` figure. """ # Extract the total energy of the AE relaxed structure (4). d = OrderedDict([(ecut, data["etotals"][4]) for ecut, data in self["deltafactor"].items()]) # Ecut mesh in Ha ecuts = np.array(list(d.keys())) ecut_min, ecut_max = np.min(ecuts), np.max(ecuts) # Energies per atom in meV and difference wrt 'converged' value num_sites = [v["num_sites"] for v in self["deltafactor"].values()][0] etotals_mev = np.array([d[e] for e in ecuts]) * 1000 / num_sites ediffs = etotals_mev - etotals_mev[-1] ax, fig, plt = get_ax_fig_plt(ax) #ax.yaxis.set_view_interval(-5, 5) lines, legends = [], [] xs = 1/ecuts if inv_ecut else ecuts ys = etotals_mev if inv_ecut else ediffs line, = ax.plot(xs, ys, "-o", color="blue") #, linewidth=3.0, markersize=15) lines.append(line) label = kwargs.pop("label", None) if label is not None: ax.legend(lines, [label], loc='best', shadow=True) high_hint = self["ppgen_hints"]["high"]["ecut"] #ax.vlines(high_hint, min(ediffs), max(ediffs)) #ax.vlines(high_hint, 0.5, 1.5) #ax.scatter([high_hint], [1.0], s=20) #, c='b', marker='o', cmap=None, norm=None) #ax.arrow(high_hint, 1, 0, 0.2, head_width=0.05, head_length=0.1, fc='k', ec='k',head_starts_at_zero=False) #ax.hlines(5, ecut_min, ecut_max, label="5.0") #ax.hlines(1, ecut_min, ecut_max, label="1.0") #ax.hlines(0.5, ecut_min, ecut_max, label="0.2") # Set xticks and labels. ax.grid(True) ax.set_xlabel("Ecut [Ha]") ax.set_xticks(xs) ax.set_ylabel("Delta Etotal/natom [meV]") #ax.set_xlim(0, max(xs)) # Use logscale if possible. if all(ediffs[:-1] > 0): ax.set_yscale("log") ax.set_xlim(xs[0]-1, xs[-2]+1) return fig @add_fig_kwargs def plot_deltafactor_eos(self, ax=None, **kwargs): """ plot the EOS computed with the deltafactor setup. Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. ================ ============================================================== kwargs Meaning ================ ============================================================== cmap Color map. default `jet` ================ ============================================================== Returns: `matplotlib` figure. """ ax, fig, plt = get_ax_fig_plt(ax) trial = "deltafactor" ecuts = self[trial].keys() num_ecuts = len(ecuts) cmap = kwargs.pop("cmap", None) if cmap is None: cmap = plt.get_cmap("jet") for i, ecut in enumerate(ecuts): d = self[trial][ecut] num_sites, volumes, etotals = d["num_sites"], np.array(d["volumes"]), np.array(d["etotals"]) # Use same fit as the one employed for the deltafactor. eos_fit = EOS.DeltaFactor().fit(volumes/num_sites, etotals/num_sites) label = "ecut %.1f" % ecut if i % 2 == 0 else "" label = "ecut %.1f" % ecut eos_fit.plot(ax=ax, text=False, label=label, color=cmap(i/num_ecuts, alpha=1), show=False) return fig def get_ecut_dfactprime(self): data = self["deltafactor"] ecuts, values= data.keys(), [] values = np.array([data[e]["dfactprime_meV"] for e in ecuts]) return np.array(ecuts), values def compute_hints(self): ecuts, dfacts = self.get_ecut_dfactprime() abs_diffs = np.abs((dfacts - dfacts[-1])) #print(list(zip(ecuts, dfacts))) #print(abs_diffs) hints = 3 * [None] for ecut, adiff in zip(ecuts, abs_diffs): for i in range(3): if adiff <= self.ATOLS[i] and hints[i] is None: hints[i] = ecut if adiff > self.ATOLS[i]: hints[i] = None return hints def check(self): """ Check the dojo report for inconsistencies. Return a string with the errors found in the DOJO_REPORT. """ errors = [] app = errors.append if "version" not in self: app("version is missing") if "ppgen_hints" not in self: app("version is missing") if "md5" not in self: app("md5 checksum is missing!") # Check if we have computed each trial for the full set of ecuts in global_ecuts global_ecuts = self.ecuts missing = defaultdict(list) for trial in self.ALL_TRIALS: for ecut in global_ecuts: if not self.has_trial(trial, ecut=ecut): missing[trial].append(ecut) if missing: app("The following list of ecut energies is missing:") for trial, ecuts in missing.items(): app("%s: %s" % (trial, ecuts)) return "\n".join(errors) @add_fig_kwargs def plot_deltafactor_convergence(self, code="WIEN2k", what=None, ax_list=None, **kwargs): """ plot the convergence of the deltafactor parameters wrt ecut. Args: code: Reference code ax_list: List of matplotlib Axes, if ax_list is None a new figure is created Returns: `matplotlib` figure. """ all = ["dfact_meV", "dfactprime_meV", "v0", "b0_GPa", "b1"] if what is None: keys = all else: what = list_strings(what) if what[0].startswith("-"): # Exclude keys #print([type(w) for w in what]) what = [w[1:] for w in what] keys = [k for k in all if k not in what] else: keys = what # get reference entry from pseudo_dojo.refdata.deltafactor import df_database reference = df_database().get_entry(symbol=self.symbol, code=code) d = self["deltafactor"] ecuts = list(d.keys()) import matplotlib.pyplot as plt if ax_list is None: fig, ax_list = plt.subplots(nrows=len(keys), ncols=1, sharex=True, squeeze=False) ax_list = ax_list.ravel() else: fig = plt.gcf() #ax_list, fig, plt = get_axarray_fig_plt(ax_list, nrows=len(keys), ncols=1, sharex=True, squeeze=False) if len(keys) != len(ax_list): raise ValueError("len(keys)=%s != len(ax_list)=%s" % (len(keys), len(ax_list))) for i, (ax, key) in enumerate(zip(ax_list, keys)): values = np.array([float(d[ecut][key]) for ecut in ecuts]) #try: refval = getattr(reference, key) #except AttributeError: # refval = 0.0 # Plot difference pseudo - ref. ax.plot(ecuts, values - refval, "o-") ax.grid(True) ax.set_ylabel("$\Delta$" + key) if i == len(keys) - 1: ax.set_xlabel("Ecut [Ha]") if key == "dfactprime_meV": # Add horizontal lines (used to find hints for ecut). last = values[-1] xmin, xmax = min(ecuts), max(ecuts) for pad, color in zip(self.ATOLS, ("blue", "red", "violet")): ax.hlines(y=last + pad, xmin=xmin, xmax=xmax, colors=color, linewidth=1, linestyles='dashed') ax.hlines(y=last - pad, xmin=xmin, xmax=xmax, colors=color, linewidth=1, linestyles='dashed') # Set proper limits so that we focus on the relevant region. ax.set_ylim(last - 1.1*self.ATOLS[0], last + 1.1*self.ATOLS[0]) return fig @add_fig_kwargs def plot_gbrv_eos(self, struct_type, ax=None, **kwargs): """ Uses Matplotlib to plot the EOS computed with the GBRV setup Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. ================ ============================================================== kwargs Meaning ================ ============================================================== cmap Color map. default `jet` ================ ============================================================== Returns: `matplotlib` figure or None if the GBRV test is not present """ ax, fig, plt = get_ax_fig_plt(ax) trial = "gbrv_" + struct_type # Handle missing entries: noble gases, Hg ... if trial not in self: return None ecuts = self[trial].keys() num_ecuts = len(ecuts) cmap = kwargs.pop("cmap", None) if cmap is None: cmap = plt.get_cmap("jet") for i, ecut in enumerate(ecuts): d = self[trial][ecut] volumes, etotals = np.array(d["volumes"]), np.array(d["etotals"]) eos_fit = EOS.Quadratic().fit(volumes, etotals) label = "ecut %.1f" % ecut if i % 2 == 0 else "" label = "ecut %.1f" % ecut eos_fit.plot(ax=ax, text=False, label=label, color=cmap(i/num_ecuts, alpha=1), show=False) return fig @add_fig_kwargs def plot_gbrv_convergence(self, ax_list=None, **kwargs): """ Uses Matplotlib to plot the convergence of the GBRV parameters wrt ecut. Args: ax_list: List of matplotlib Axes, if ax_list is None a new figure is created Returns: `matplotlib` figure. """ import matplotlib.pyplot as plt stypes = ("fcc", "bcc") if ax_list is None: fig, ax_list = plt.subplots(nrows=len(stypes), ncols=1, sharex=True, squeeze=False) ax_list = ax_list.ravel() else: fig = plt.gcf() #ax_list, fig, plt = get_axarray_fig_plt(ax_list, nrows=len(stypes), ncols=1, sharex=True, squeeze=False) if len(stypes) != len(ax_list): raise ValueError("len(stypes)=%s != len(ax_list)=%s" % (len(stypes), len(ax_list))) for i, (ax, stype) in enumerate(zip(ax_list, stypes)): trial = "gbrv_" + stype d = self[trial] ecuts = list(d.keys()) values = np.array([float(d[ecut]["a0_rel_err"]) for ecut in ecuts]) ax.grid(True) ax.set_ylabel("$\Delta$" + trial + "a0_rel_err") # Plot difference pseudo - ref. ax.plot(ecuts, values, "bo-") #ax.hlines(y=0.0, xmin=min(ecuts), xmax=max(ecuts), color="red") if i == len(ax_list) - 1: ax.set_xlabel("Ecut [Ha]") return fig @add_fig_kwargs def plot_phonon_convergence(self, ax_list=None, **kwargs): """ Plot the convergence of the phonon modes wrt ecut. Args: ax_list: List of matplotlib Axes, if ax_list is None a new figure is created Returns: `matplotlib` figure. """ d = self["phonon"] ecuts = list(d.keys()) l = [(ecut, float(ecut)) for ecut in ecuts] s = sorted(l, key=lambda t: t[1]) max_ecut = s[-1][0] s_ecuts = [ecut[0] for ecut in s] import matplotlib.pyplot as plt fig, ax = plt.subplots(nrows=2, sharex=True) #ax_list, fig, plt = get_axarray_fig_plt(ax_list, nrows=len(keys), ncols=1, sharex=True, squeeze=False) fmin, fmax = np.inf, -np.inf for i, v in enumerate(d[ecuts[0]]): values1 = np.array([float(d[ecut][i]) for ecut in s_ecuts]) fmin = min(fmin, values1.min()) fmax = max(fmax, values1.max()) ax[0].plot(s_ecuts, values1, "o-") ax[0].grid(True) ax[0].set_ylabel("phonon modes [meV] (asr==2)") ax[0].set_xlabel("Ecut [Ha]") values2 = np.array([float(d[ecut][i]) - float(d[max_ecut][i]) for ecut in s_ecuts]) ax[1].plot(s_ecuts, values2, "o-") ax[1].grid(True) ax[1].set_ylabel("w - w(ecut_max) [meV]") ax[1].set_xlabel("Ecut [Ha]") # Adjust limits. fmin -= 10 fmax += 10 ax[0].set_ylim(fmin, fmax) return fig
migueldiascosta/pymatgen
pymatgen/io/abinit/pseudos.py
Python
mit
100,402
[ "ABINIT", "WIEN2k", "pymatgen" ]
83751716ea05c5e89c513436e6306c4a0c6fb689c74ccffdd156862e3129a2b3
# Copyright (C) 2018 Fulvio Paleari, Henrique Pereira Coutada Miranda # All rights reserved. # # This file is part of yambopy # from __future__ import print_function from builtins import zip from builtins import map from builtins import range from yamboparser import * from os import * import argparse def merge_qp(output,files,verbose=False): #read all the files and display main info in each of them print("=========input=========") filenames = [ f.name for f in files] datasets = [ Dataset(filename) for filename in filenames] QP_table, QP_kpts, QP_E_E0_Z = [], [], [] for d,filename in zip(datasets,filenames): _, nkpoints, nqps, _, nstrings = list(map(int,d['PARS'][:])) print("filename: ", filename) if verbose: print("description:") for i in range(1,nstrings+1): print(''.join(d['DESC_strings_%05d'%i][0])) else: print("description:", ''.join(d['DESC_strings_%05d'%(nstrings)][0])) print() QP_table.append( d['QP_table'][:].T ) QP_kpts.append( d['QP_kpts'][:].T ) QP_E_E0_Z.append( d['QP_E_Eo_Z'][:] ) # create the QP_table QP_table_save = np.vstack(QP_table) # create the kpoints table #create a list with the bigger size of QP_table nkpoints = int(max(QP_table_save[:,2])) QP_kpts_save = np.zeros([nkpoints,3]) #iterate over the QP's and store the corresponding kpoint for qp_file,kpts in zip(QP_table,QP_kpts): #iterate over the kpoints and save the coordinates on the list for qp in qp_file: n1,n2,nk = list(map(int,qp)) QP_kpts_save[nk-1] = kpts[nk-1] # create the QPs energies table QP_E_E0_Z_save = np.concatenate(QP_E_E0_Z,axis=1) #create reference file from one of the files fin = datasets[0] fout = Dataset(output,'w') variables_update = ['QP_table', 'QP_kpts', 'QP_E_Eo_Z'] variables_save = [QP_table_save.T, QP_kpts_save.T, QP_E_E0_Z_save] variables_dict = dict(list(zip(variables_update,variables_save))) PARS_save = fin['PARS'][:] PARS_save[1:3] = nkpoints,len(QP_table_save) #create the description string kmin,kmax = np.amin(QP_table_save[:,2]),np.amax(QP_table_save[:,2]) bmin,bmax = np.amin(QP_table_save[:,1]),np.amax(QP_table_save[:,1]) description = "QP @ K %03d - %03d : b %03d - %03d"%(kmin,kmax,bmin,bmax) description_save = np.array([i for i in " %s"%description]) #output data print("========output=========") print("filename: ", output) print("description: ", description) #copy dimensions for dname, the_dim in list(fin.dimensions.items()): fout.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None) #get dimensions def dimensions(array): return tuple([ 'D_%010d'%d for d in array.shape ]) #create missing dimensions for v in variables_save: for dname,d in zip( dimensions(v),v.shape ): if dname not in list(fout.dimensions.keys()): fout.createDimension(dname, d) #copy variables for v_name, varin in list(fin.variables.items()): if v_name in variables_update: #get the variable merged = variables_dict[v_name] # create the variable outVar = fout.createVariable(v_name, varin.datatype, dimensions(merged)) # Copy variable attributes outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()}) #save outvar outVar[:] = merged else: # create the variable outVar = fout.createVariable(v_name, varin.datatype, varin.dimensions) # Copy variable attributes outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()}) if v_name=='PARS': outVar[:] = PARS_save[:] elif v_name=='DESC_strings_%05d'%(nstrings): outVar[:] = varin[:] outVar[:,:len(description_save)] = description_save.T else: outVar[:] = varin[:] fout.close() if __name__ == "__main__": parser = argparse.ArgumentParser(description='Join different NetCDF quasi-particle databases') parser.add_argument('files', nargs='+', type=argparse.FileType('r')) parser.add_argument('-o','--output', help='Output filename', default='ndb_out.QP') parser.add_argument('-v','--verbose', action="store_true", help='Verbose mode') args = parser.parse_args() if args.files is None: parser.print_help() exit() output = args.output files = args.files verbose = args.verbose merge_qp(output,files,verbose)
alexmoratalla/yambopy
scripts/merge_qp.py
Python
bsd-3-clause
4,776
[ "NetCDF" ]
5d147e78a6559c5e45d57974d1c017b5e5ea4d87c4ff9b4064b84a052fe9fdae
# (C) British Crown Copyright 2010 - 2018, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ Provides access to Iris-specific configuration values. The default configuration values can be overridden by creating the file ``iris/etc/site.cfg``. If it exists, this file must conform to the format defined by :mod:`ConfigParser`. ---------- .. py:data:: iris.config.TEST_DATA_DIR Local directory where test data exists. Defaults to "test_data" sub-directory of the Iris package install directory. The test data directory supports the subset of Iris unit tests that require data. Directory contents accessed via :func:`iris.tests.get_data_path`. .. py:data:: iris.config.PALETTE_PATH The full path to the Iris palette configuration directory .. py:data:: iris.config.IMPORT_LOGGER The [optional] name of the logger to notify when first imported. ---------- """ from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa import six from six.moves import configparser import contextlib import os.path import sys import warnings # Returns simple string options def get_option(section, option, default=None): """ Returns the option value for the given section, or the default value if the section/option is not present. """ value = default if config.has_option(section, option): value = config.get(section, option) return value # Returns directory path options def get_dir_option(section, option, default=None): """ Returns the directory path from the given option and section, or returns the given default value if the section/option is not present or does not represent a valid directory. """ path = default if config.has_option(section, option): c_path = config.get(section, option) if os.path.isdir(c_path): path = c_path else: msg = 'Ignoring config item {!r}:{!r} (section:option) as {!r}' \ ' is not a valid directory path.' warnings.warn(msg.format(section, option, c_path)) return path # Figure out the full path to the "iris" package. ROOT_PATH = os.path.abspath(os.path.dirname(__file__)) # The full path to the configuration directory of the active Iris instance. CONFIG_PATH = os.path.join(ROOT_PATH, 'etc') # Load the optional "site.cfg" file if it exists. if sys.version_info >= (3, 2): config = configparser.ConfigParser() else: config = configparser.SafeConfigParser() config.read([os.path.join(CONFIG_PATH, 'site.cfg')]) ################## # Resource options _RESOURCE_SECTION = 'Resources' TEST_DATA_DIR = get_dir_option(_RESOURCE_SECTION, 'test_data_dir', default=os.path.join(os.path.dirname(__file__), 'test_data')) # Override the data repository if the appropriate environment variable # has been set. This is used in setup.py in the TestRunner command to # enable us to simulate the absence of external data. override = os.environ.get("OVERRIDE_TEST_DATA_REPOSITORY") if override: TEST_DATA_DIR = None if os.path.isdir(os.path.expanduser(override)): TEST_DATA_DIR = os.path.abspath(override) PALETTE_PATH = get_dir_option(_RESOURCE_SECTION, 'palette_path', os.path.join(CONFIG_PATH, 'palette')) # Runtime options class NetCDF(object): """Control Iris NetCDF options.""" def __init__(self, conventions_override=None): """ Set up NetCDF processing options for Iris. Currently accepted kwargs: * conventions_override (bool): Define whether the CF Conventions version (e.g. `CF-1.6`) set when saving a cube to a NetCDF file should be defined by Iris (the default) or the cube being saved. If `False` (the default), specifies that Iris should set the CF Conventions version when saving cubes as NetCDF files. If `True`, specifies that the cubes being saved to NetCDF should set the CF Conventions version for the saved NetCDF files. Example usages: * Specify, for the lifetime of the session, that we want all cubes written to NetCDF to define their own CF Conventions versions:: iris.config.netcdf.conventions_override = True iris.save('my_cube', 'my_dataset.nc') iris.save('my_second_cube', 'my_second_dataset.nc') * Specify, with a context manager, that we want a cube written to NetCDF to define its own CF Conventions version:: with iris.config.netcdf.context(conventions_override=True): iris.save('my_cube', 'my_dataset.nc') """ # Define allowed `__dict__` keys first. self.__dict__['conventions_override'] = None # Now set specific values. setattr(self, 'conventions_override', conventions_override) def __repr__(self): msg = 'NetCDF options: {}.' # Automatically populate with all currently accepted kwargs. options = ['{}={}'.format(k, v) for k, v in six.iteritems(self.__dict__)] joined = ', '.join(options) return msg.format(joined) def __setattr__(self, name, value): if name not in self.__dict__: # Can't add new names. msg = 'Cannot set option {!r} for {} configuration.' raise AttributeError(msg.format(name, self.__class__.__name__)) if value is None: # Set an unset value to the name's default. value = self._defaults_dict[name]['default'] if self._defaults_dict[name]['options'] is not None: # Replace a bad value with a good one if there is a defined set of # specified good values. If there isn't, we can assume that # anything goes. if value not in self._defaults_dict[name]['options']: good_value = self._defaults_dict[name]['default'] wmsg = ('Attempting to set invalid value {!r} for ' 'attribute {!r}. Defaulting to {!r}.') warnings.warn(wmsg.format(value, name, good_value)) value = good_value self.__dict__[name] = value @property def _defaults_dict(self): # Set this as a property so that it isn't added to `self.__dict__`. return {'conventions_override': {'default': False, 'options': [True, False]}, } @contextlib.contextmanager def context(self, **kwargs): """ Allow temporary modification of the options via a context manager. Accepted kwargs are the same as can be supplied to the Option. """ # Snapshot the starting state for restoration at the end of the # contextmanager block. starting_state = self.__dict__.copy() # Update the state to reflect the requested changes. for name, value in six.iteritems(kwargs): setattr(self, name, value) try: yield finally: # Return the state to the starting state. self.__dict__.clear() self.__dict__.update(starting_state) netcdf = NetCDF()
dkillick/iris
lib/iris/config.py
Python
lgpl-3.0
7,984
[ "NetCDF" ]
b4216fcdaeec409c01cc4d954d41ccf82552f23bebde27d2bf4eed24f509b199
import sys from flask import Flask, request import telepot from telepot.loop import OrderedWebhook """ $ python2.7 flask_deeplinking.py <bot_username> <token> <listening_port> https://<domain>/webhook Webhook path is '/webhook'. Initial webpage is '/link'. 1. Open browser, visit: `https://<domain>/link` 2. Click on the link 3. On Telegram conversation, click on the `START` button 4. Bot should receive a message `/start ghijk`, where `ghijk` is the payload embedded in the link. You may use this payload to identify the user, then his Telegram `chat_id`. """ key_id_map = { 'ghijk' : 123 } def handle(msg): content_type, chat_type, chat_id = telepot.glance(msg) print 'Chat Message:', content_type, chat_type, chat_id if content_type == 'text': text = msg['text'] print 'Text:', text if text.startswith('/start'): try: command, payload = text.split(' ') print 'Payload:', payload print 'User ID:', key_id_map[payload] print 'chat_id:', chat_id except ValueError: print 'No payload, or more than one chunk of payload' except KeyError: print 'Invalid key, no corresponding User ID' BOT_USERNAME = sys.argv[1] TOKEN = sys.argv[2] PORT = int(sys.argv[3]) URL = sys.argv[4] app = Flask(__name__) bot = telepot.Bot(TOKEN) webhook = OrderedWebhook(bot, handle) @app.route('/link', methods=['GET', 'POST']) def display_link(): first_key_in_database = key_id_map.items()[0][0] return '<a href="https://telegram.me/%s?start=%s">Open conversation with bot</a>' % (BOT_USERNAME, first_key_in_database) @app.route('/webhook', methods=['GET', 'POST']) def pass_update(): webhook.feed(request.data) return 'OK' if __name__ == '__main__': try: bot.setWebhook(URL) # Sometimes it would raise this error, but webhook still set successfully. except telepot.exception.TooManyRequestsError: pass webhook.run_as_thread() app.run(port=PORT, debug=True)
nickoala/telepot
examples/deeplinking/flask_deeplinking.py
Python
mit
2,073
[ "VisIt" ]
f8858af2274928a93875779823371949c3300bee1e5d11ec0b6b0739e71d9c9e
# coding: utf-8 """ Acceptance tests for Studio's Setting pages """ import os from textwrap import dedent from bok_choy.promise import EmptyPromise from mock import patch from common.test.acceptance.fixtures.course import XBlockFixtureDesc from common.test.acceptance.pages.common.utils import add_enrollment_course_modes from common.test.acceptance.pages.studio.overview import CourseOutlinePage from common.test.acceptance.pages.studio.settings import SettingsPage from common.test.acceptance.pages.studio.settings_group_configurations import GroupConfigurationsPage from common.test.acceptance.pages.studio.utils import get_input_value from common.test.acceptance.tests.helpers import create_user_partition_json, element_has_text from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest from openedx.core.lib.tests import attr from xmodule.partitions.partitions import Group @attr(shard=19) class ContentGroupConfigurationTest(StudioCourseTest): """ Tests for content groups in the Group Configurations Page. There are tests for the experiment groups in test_studio_split_test. """ def setUp(self): super(ContentGroupConfigurationTest, self).setUp() self.group_configurations_page = GroupConfigurationsPage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) self.outline_page = CourseOutlinePage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) def populate_course_fixture(self, course_fixture): """ Populates test course with chapter, sequential, and 1 problems. The problem is visible only to Group "alpha". """ course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section').add_children( XBlockFixtureDesc('sequential', 'Test Subsection').add_children( XBlockFixtureDesc('vertical', 'Test Unit') ) ) ) def create_and_verify_content_group(self, name, existing_groups): """ Creates a new content group and verifies that it was properly created. """ self.assertEqual(existing_groups, len(self.group_configurations_page.content_groups)) if existing_groups == 0: self.group_configurations_page.create_first_content_group() else: self.group_configurations_page.add_content_group() config = self.group_configurations_page.content_groups[existing_groups] config.name = name # Save the content group self.assertEqual(config.get_text('.action-primary'), "Create") self.assertFalse(config.delete_button_is_present) config.save() self.assertIn(name, config.name) return config def test_no_content_groups_by_default(self): """ Scenario: Ensure that message telling me to create a new content group is shown when no content groups exist. Given I have a course without content groups When I go to the Group Configuration page in Studio Then I see "You have not created any content groups yet." message """ self.group_configurations_page.visit() self.assertTrue(self.group_configurations_page.no_content_groups_message_is_present) self.assertIn( "You have not created any content groups yet.", self.group_configurations_page.no_content_groups_message_text ) def test_can_create_and_edit_content_groups(self): """ Scenario: Ensure that the content groups can be created and edited correctly. Given I have a course without content groups When I click button 'Add your first Content Group' And I set new the name and click the button 'Create' Then I see the new content is added and has correct data And I click 'New Content Group' button And I set the name and click the button 'Create' Then I see the second content group is added and has correct data When I edit the second content group And I change the name and click the button 'Save' Then I see the second content group is saved successfully and has the new name """ self.group_configurations_page.visit() self.create_and_verify_content_group("New Content Group", 0) second_config = self.create_and_verify_content_group("Second Content Group", 1) # Edit the second content group second_config.edit() second_config.name = "Updated Second Content Group" self.assertEqual(second_config.get_text('.action-primary'), "Save") second_config.save() self.assertIn("Updated Second Content Group", second_config.name) def test_cannot_delete_used_content_group(self): """ Scenario: Ensure that the user cannot delete used content group. Given I have a course with 1 Content Group And I go to the Group Configuration page When I try to delete the Content Group with name "New Content Group" Then I see the delete button is disabled. """ self.course_fixture._update_xblock(self.course_fixture._course_location, { "metadata": { u"user_partitions": [ create_user_partition_json( 0, 'Configuration alpha,', 'Content Group Partition', [Group("0", 'alpha')], scheme="cohort" ) ], }, }) problem_data = dedent(""" <problem markdown="Simple Problem" max_attempts="" weight=""> <p>Choose Yes.</p> <choiceresponse> <checkboxgroup> <choice correct="true">Yes</choice> </checkboxgroup> </choiceresponse> </problem> """) vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0] self.course_fixture.create_xblock( vertical.locator, XBlockFixtureDesc('problem', "VISIBLE TO ALPHA", data=problem_data, metadata={"group_access": {0: [0]}}), ) self.group_configurations_page.visit() config = self.group_configurations_page.content_groups[0] self.assertTrue(config.delete_button_is_disabled) def test_can_delete_unused_content_group(self): """ Scenario: Ensure that the user can delete unused content group. Given I have a course with 1 Content Group And I go to the Group Configuration page When I delete the Content Group with name "New Content Group" Then I see that there is no Content Group When I refresh the page Then I see that the content group has been deleted """ self.group_configurations_page.visit() config = self.create_and_verify_content_group("New Content Group", 0) self.assertTrue(config.delete_button_is_present) self.assertEqual(len(self.group_configurations_page.content_groups), 1) # Delete content group config.delete() self.assertEqual(len(self.group_configurations_page.content_groups), 0) self.group_configurations_page.visit() self.assertEqual(len(self.group_configurations_page.content_groups), 0) def test_must_supply_name(self): """ Scenario: Ensure that validation of the content group works correctly. Given I have a course without content groups And I create new content group without specifying a name click the button 'Create' Then I see error message "Content Group name is required." When I set a name and click the button 'Create' Then I see the content group is saved successfully """ self.group_configurations_page.visit() self.group_configurations_page.create_first_content_group() config = self.group_configurations_page.content_groups[0] config.save() self.assertEqual(config.mode, 'edit') self.assertEqual("Group name is required", config.validation_message) config.name = "Content Group Name" config.save() self.assertIn("Content Group Name", config.name) def test_can_cancel_creation_of_content_group(self): """ Scenario: Ensure that creation of a content group can be canceled correctly. Given I have a course without content groups When I click button 'Add your first Content Group' And I set new the name and click the button 'Cancel' Then I see that there is no content groups in the course """ self.group_configurations_page.visit() self.group_configurations_page.create_first_content_group() config = self.group_configurations_page.content_groups[0] config.name = "Content Group" config.cancel() self.assertEqual(0, len(self.group_configurations_page.content_groups)) def test_content_group_empty_usage(self): """ Scenario: When content group is not used, ensure that the link to outline page works correctly. Given I have a course without content group And I create new content group Then I see a link to the outline page When I click on the outline link Then I see the outline page """ self.group_configurations_page.visit() config = self.create_and_verify_content_group("New Content Group", 0) config.toggle() config.click_outline_anchor() # Waiting for the page load and verify that we've landed on course outline page self.outline_page.wait_for_page() @attr('a11y') class StudioSettingsA11yTest(StudioCourseTest): """ Class to test Studio pages accessibility. """ def setUp(self): # pylint: disable=arguments-differ super(StudioSettingsA11yTest, self).setUp() self.settings_page = SettingsPage(self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']) def test_studio_settings_page_a11y(self): """ Check accessibility of SettingsPage. """ self.settings_page.visit() self.settings_page.wait_for_page() self.settings_page.a11y_audit.config.set_rules({ "ignore": [ 'link-href', # TODO: AC-590 'aria-allowed-role', # TODO: AC-936 'landmark-complementary-is-top-level', # TODO: AC-939 'radiogroup', # TODO: AC-941 'region', # TODO: AC-932 ], }) self.settings_page.a11y_audit.check_for_accessibility_errors() @attr('a11y') class StudioSubsectionSettingsA11yTest(StudioCourseTest): """ Class to test accessibility on the subsection settings modals. """ def setUp(self): # pylint: disable=arguments-differ browser = os.environ.get('SELENIUM_BROWSER', 'firefox') # This test will fail if run using phantomjs < 2.0, due to an issue with bind() # See https://github.com/ariya/phantomjs/issues/10522 for details. # The course_outline uses this function, and as such will not fully load when run # under phantomjs 1.9.8. So, to prevent this test from timing out at course_outline.visit(), # force the use of firefox vs the standard a11y test usage of phantomjs 1.9.8. # TODO: remove this block once https://openedx.atlassian.net/browse/TE-1047 is resolved. if browser == 'phantomjs': browser = 'firefox' with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}): super(StudioSubsectionSettingsA11yTest, self).setUp(is_staff=True) self.course_outline = CourseOutlinePage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) def populate_course_fixture(self, course_fixture): course_fixture.add_advanced_settings({ "enable_proctored_exams": {"value": "true"} }) course_fixture.add_children( XBlockFixtureDesc('chapter', 'Test Section 1').add_children( XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children( XBlockFixtureDesc('problem', 'Test Problem 1') ) ) ) def test_special_exams_menu_a11y(self): """ Given that I am a staff member And I am editing settings on the special exams menu Then that menu is accessible """ self.course_outline.visit() self.course_outline.open_subsection_settings_dialog() self.course_outline.select_advanced_tab() self.course_outline.a11y_audit.config.set_rules({ "ignore": [ 'section', # TODO: AC-491 ], }) # limit the scope of the audit to the special exams tab on the modal dialog self.course_outline.a11y_audit.config.set_scope( include=['section.edit-settings-timed-examination'] ) self.course_outline.a11y_audit.check_for_accessibility_errors() @attr(shard=15) class StudioSettingsImageUploadTest(StudioCourseTest): """ Class to test course settings image uploads. """ def setUp(self): # pylint: disable=arguments-differ super(StudioSettingsImageUploadTest, self).setUp() self.settings_page = SettingsPage(self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']) self.settings_page.visit() # Ensure jquery is loaded before running a jQuery self.settings_page.wait_for_ajax() # This text appears towards the end of the work that jQuery is performing on the page self.settings_page.wait_for_jquery_value('input#course-name:text', 'test_run') def test_upload_course_card_image(self): # upload image file_to_upload = 'image.jpg' self.settings_page.upload_image('#upload-course-image', file_to_upload) self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#course-image')) def test_upload_course_banner_image(self): # upload image file_to_upload = 'image.jpg' self.settings_page.upload_image('#upload-banner-image', file_to_upload) self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#banner-image')) def test_upload_course_video_thumbnail_image(self): # upload image file_to_upload = 'image.jpg' self.settings_page.upload_image('#upload-video-thumbnail-image', file_to_upload) self.assertIn(file_to_upload, self.settings_page.get_uploaded_image_path('#video-thumbnail-image')) @attr(shard=16) class CourseSettingsTest(StudioCourseTest): """ Class to test course settings. """ COURSE_START_DATE_CSS = "#course-start-date" COURSE_END_DATE_CSS = "#course-end-date" ENROLLMENT_START_DATE_CSS = "#course-enrollment-start-date" ENROLLMENT_END_DATE_CSS = "#course-enrollment-end-date" COURSE_START_TIME_CSS = "#course-start-time" COURSE_END_TIME_CSS = "#course-end-time" ENROLLMENT_START_TIME_CSS = "#course-enrollment-start-time" ENROLLMENT_END_TIME_CSS = "#course-enrollment-end-time" course_start_date = '12/20/2013' course_end_date = '12/26/2013' enrollment_start_date = '12/01/2013' enrollment_end_date = '12/10/2013' dummy_time = "15:30" def setUp(self, is_staff=False, test_xss=True): super(CourseSettingsTest, self).setUp() self.settings_page = SettingsPage( self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run'] ) # Before every test, make sure to visit the page first self.settings_page.visit() self.ensure_input_fields_are_loaded() def set_course_dates(self): """ Set dates for the course. """ dates_dictionary = { self.COURSE_START_DATE_CSS: self.course_start_date, self.COURSE_END_DATE_CSS: self.course_end_date, self.ENROLLMENT_START_DATE_CSS: self.enrollment_start_date, self.ENROLLMENT_END_DATE_CSS: self.enrollment_end_date } self.settings_page.set_element_values(dates_dictionary) def ensure_input_fields_are_loaded(self): """ Ensures values in input fields are loaded. """ EmptyPromise( lambda: self.settings_page.q(css='#course-organization').attrs('value')[0], "Waiting for input fields to be loaded" ).fulfill() def test_user_can_set_course_date(self): """ Scenario: User can set course dates Given I have opened a new course in Studio When I select Schedule and Details And I set course dates And I press the "Save" notification button And I reload the page Then I see the set dates """ # Set dates self.set_course_dates() # Set times time_dictionary = { self.COURSE_START_TIME_CSS: self.dummy_time, self.ENROLLMENT_END_TIME_CSS: self.dummy_time } self.settings_page.set_element_values(time_dictionary) # Save changes self.settings_page.save_changes() self.settings_page.refresh_and_wait_for_load() self.ensure_input_fields_are_loaded() css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS, self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS, self.COURSE_START_TIME_CSS, self.ENROLLMENT_END_TIME_CSS] expected_values = [self.course_start_date, self.course_end_date, self.enrollment_start_date, self.enrollment_end_date, self.dummy_time, self.dummy_time] # Assert changes have been persistent. self.assertEqual( [get_input_value(self.settings_page, css_selector) for css_selector in css_selectors], expected_values ) def test_clear_previously_set_course_dates(self): """ Scenario: User can clear previously set course dates (except start date) Given I have set course dates And I clear all the dates except start And I press the "Save" notification button And I reload the page Then I see cleared dates """ # Set dates self.set_course_dates() # Clear all dates except start date values_to_set = { self.COURSE_END_DATE_CSS: '', self.ENROLLMENT_START_DATE_CSS: '', self.ENROLLMENT_END_DATE_CSS: '' } self.settings_page.set_element_values(values_to_set) # Save changes and refresh the page self.settings_page.save_changes() self.settings_page.refresh_and_wait_for_load() self.ensure_input_fields_are_loaded() css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS, self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS] expected_values = [self.course_start_date, '', '', ''] # Assert changes have been persistent. self.assertEqual( [get_input_value(self.settings_page, css_selector) for css_selector in css_selectors], expected_values ) def test_cannot_clear_the_course_start_date(self): """ Scenario: User cannot clear the course start date Given I have set course dates And I press the "Save" notification button And I clear the course start date Then I receive a warning about course start date And I reload the page And the previously set start date is shown """ # Set dates self.set_course_dates() # Save changes self.settings_page.save_changes() # Get default start date default_start_date = get_input_value(self.settings_page, self.COURSE_START_DATE_CSS) # Set course start date to empty self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: ''}) # Make sure error message is show with appropriate message error_message_css = '.message-error' self.settings_page.wait_for_element_presence(error_message_css, 'Error message is present') self.assertEqual(element_has_text(self.settings_page, error_message_css, "The course must have an assigned start date."), True) # Refresh the page and assert start date has not changed. self.settings_page.refresh_and_wait_for_load() self.ensure_input_fields_are_loaded() self.assertEqual( get_input_value(self.settings_page, self.COURSE_START_DATE_CSS), default_start_date ) def test_user_can_correct_course_start_date_warning(self): """ Scenario: User can correct the course start date warning Given I have tried to clear the course start And I have entered a new course start date And I press the "Save" notification button Then The warning about course start date goes away And I reload the page Then my new course start date is shown """ # Set course start date to empty self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: ''}) # Make sure we get error message error_message_css = '.message-error' self.settings_page.wait_for_element_presence(error_message_css, 'Error message is present') self.assertEqual(element_has_text(self.settings_page, error_message_css, "The course must have an assigned start date."), True) # Set new course start value self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: self.course_start_date}) self.settings_page.un_focus_input_field() # Error message disappears self.settings_page.wait_for_element_absence(error_message_css, 'Error message is not present') # Save the changes and refresh the page. self.settings_page.save_changes() self.settings_page.refresh_and_wait_for_load() self.ensure_input_fields_are_loaded() # Assert changes are persistent. self.assertEqual( get_input_value(self.settings_page, self.COURSE_START_DATE_CSS), self.course_start_date ) def test_settings_are_only_persisted_when_saved(self): """ Scenario: Settings are only persisted when saved Given I have set course dates And I press the "Save" notification button When I change fields And I reload the page Then I do not see the changes """ # Set course dates. self.set_course_dates() # Save changes. self.settings_page.save_changes() default_value_enrollment_start_date = get_input_value(self.settings_page, self.ENROLLMENT_START_TIME_CSS) # Set the value of enrollment start time and # reload the page without saving. self.settings_page.set_element_values({self.ENROLLMENT_START_TIME_CSS: self.dummy_time}) self.settings_page.refresh_and_wait_for_load() self.ensure_input_fields_are_loaded() css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS, self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS, self.ENROLLMENT_START_TIME_CSS] expected_values = [self.course_start_date, self.course_end_date, self.enrollment_start_date, self.enrollment_end_date, default_value_enrollment_start_date] # Assert that value of enrolment start time # is not saved. self.assertEqual( [get_input_value(self.settings_page, css_selector) for css_selector in css_selectors], expected_values ) def test_settings_are_reset_on_cancel(self): """ Scenario: Settings are reset on cancel Given I have set course dates And I press the "Save" notification button When I change fields And I press the "Cancel" notification button Then I do not see the changes """ # Set course date self.set_course_dates() # Save changes self.settings_page.save_changes() default_value_enrollment_start_date = get_input_value(self.settings_page, self.ENROLLMENT_START_TIME_CSS) # Set value but don't save it. self.settings_page.set_element_values({self.ENROLLMENT_START_TIME_CSS: self.dummy_time}) self.settings_page.click_button("cancel") # Make sure changes are not saved after cancel. css_selectors = [self.COURSE_START_DATE_CSS, self.COURSE_END_DATE_CSS, self.ENROLLMENT_START_DATE_CSS, self.ENROLLMENT_END_DATE_CSS, self.ENROLLMENT_START_TIME_CSS] expected_values = [self.course_start_date, self.course_end_date, self.enrollment_start_date, self.enrollment_end_date, default_value_enrollment_start_date] self.assertEqual( [get_input_value(self.settings_page, css_selector) for css_selector in css_selectors], expected_values ) def test_confirmation_is_shown_on_save(self): """ Scenario: Confirmation is shown on save Given I have opened a new course in Studio When I select Schedule and Details And I change the "<field>" field to "<value>" And I press the "Save" notification button Then I see a confirmation that my changes have been saved """ # Set date self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: self.course_start_date}) # Confirmation is showed upon save. # Save_changes function ensures that save # confirmation is shown. self.settings_page.save_changes() def test_changes_in_course_overview_show_a_confirmation(self): """ Scenario: Changes in Course Overview show a confirmation Given I have opened a new course in Studio When I select Schedule and Details And I change the course overview And I press the "Save" notification button Then I see a confirmation that my changes have been saved """ # Change the value of course overview self.settings_page.change_course_description('Changed overview') # Save changes # Save_changes function ensures that save # confirmation is shown. self.settings_page.save_changes() def test_user_cannot_save_invalid_settings(self): """ Scenario: User cannot save invalid settings Given I have opened a new course in Studio When I select Schedule and Details And I change the "Course Start Date" field to "" Then the save notification button is disabled """ # Change the course start date to invalid date. self.settings_page.set_element_values({self.COURSE_START_DATE_CSS: ''}) # Confirm that save button is disabled. self.assertEqual(self.settings_page.is_element_present(".action-primary.action-save.is-disabled"), True)
cpennington/edx-platform
common/test/acceptance/tests/studio/test_studio_settings.py
Python
agpl-3.0
27,998
[ "VisIt" ]
e326d7cbcc9c37a6daad1c766f78212babad597f98f5d093a99465e26444dd49
""" core implementation of testing process: init, session, runtest loop. """ from __future__ import absolute_import, division, print_function import functools import os import sys import _pytest from _pytest import nodes import _pytest._code import py try: from collections import MutableMapping as MappingMixin except ImportError: from UserDict import DictMixin as MappingMixin from _pytest.config import directory_arg, UsageError, hookimpl from _pytest.outcomes import exit from _pytest.runner import collect_one_node tracebackcutdir = py.path.local(_pytest.__file__).dirpath() # exitcodes for the command line EXIT_OK = 0 EXIT_TESTSFAILED = 1 EXIT_INTERRUPTED = 2 EXIT_INTERNALERROR = 3 EXIT_USAGEERROR = 4 EXIT_NOTESTSCOLLECTED = 5 def pytest_addoption(parser): parser.addini("norecursedirs", "directory patterns to avoid for recursion", type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv']) parser.addini("testpaths", "directories to search for tests when no files or directories are given in the " "command line.", type="args", default=[]) # parser.addini("dirpatterns", # "patterns specifying possible locations of test files", # type="linelist", default=["**/test_*.txt", # "**/test_*.py", "**/*_test.py"] # ) group = parser.getgroup("general", "running and selection options") group._addoption('-x', '--exitfirst', action="store_const", dest="maxfail", const=1, help="exit instantly on first error or failed test."), group._addoption('--maxfail', metavar="num", action="store", type=int, dest="maxfail", default=0, help="exit after first num failures or errors.") group._addoption('--strict', action="store_true", help="marks not registered in configuration file raise errors.") group._addoption("-c", metavar="file", type=str, dest="inifilename", help="load configuration from `file` instead of trying to locate one of the implicit " "configuration files.") group._addoption("--continue-on-collection-errors", action="store_true", default=False, dest="continue_on_collection_errors", help="Force test execution even if collection errors occur.") group = parser.getgroup("collect", "collection") group.addoption('--collectonly', '--collect-only', action="store_true", help="only collect tests, don't execute them."), group.addoption('--pyargs', action="store_true", help="try to interpret all arguments as python packages.") group.addoption("--ignore", action="append", metavar="path", help="ignore path during collection (multi-allowed).") # when changing this to --conf-cut-dir, config.py Conftest.setinitial # needs upgrading as well group.addoption('--confcutdir', dest="confcutdir", default=None, metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"), help="only load conftest.py's relative to specified dir.") group.addoption('--noconftest', action="store_true", dest="noconftest", default=False, help="Don't load any conftest.py files.") group.addoption('--keepduplicates', '--keep-duplicates', action="store_true", dest="keepduplicates", default=False, help="Keep duplicate tests.") group.addoption('--collect-in-virtualenv', action='store_true', dest='collect_in_virtualenv', default=False, help="Don't ignore tests in a local virtualenv directory") group = parser.getgroup("debugconfig", "test session debugging and configuration") group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", help="base temporary directory for this test run.") def pytest_namespace(): """keeping this one works around a deeper startup issue in pytest i tried to find it for a while but the amount of time turned unsustainable, so i put a hack in to revisit later """ return {} def pytest_configure(config): __import__('pytest').config = config # compatibiltiy def wrap_session(config, doit): """Skeleton command line program""" session = Session(config) session.exitstatus = EXIT_OK initstate = 0 try: try: config._do_configure() initstate = 1 config.hook.pytest_sessionstart(session=session) initstate = 2 session.exitstatus = doit(config, session) or 0 except UsageError: raise except KeyboardInterrupt: excinfo = _pytest._code.ExceptionInfo() if initstate < 2 and isinstance(excinfo.value, exit.Exception): sys.stderr.write('{0}: {1}\n'.format( excinfo.typename, excinfo.value.msg)) config.hook.pytest_keyboard_interrupt(excinfo=excinfo) session.exitstatus = EXIT_INTERRUPTED except: # noqa excinfo = _pytest._code.ExceptionInfo() config.notify_exception(excinfo, config.option) session.exitstatus = EXIT_INTERNALERROR if excinfo.errisinstance(SystemExit): sys.stderr.write("mainloop: caught Spurious SystemExit!\n") finally: excinfo = None # Explicitly break reference cycle. session.startdir.chdir() if initstate >= 2: config.hook.pytest_sessionfinish( session=session, exitstatus=session.exitstatus) config._ensure_unconfigure() return session.exitstatus def pytest_cmdline_main(config): return wrap_session(config, _main) def _main(config, session): """ default command line protocol for initialization, session, running tests and reporting. """ config.hook.pytest_collection(session=session) config.hook.pytest_runtestloop(session=session) if session.testsfailed: return EXIT_TESTSFAILED elif session.testscollected == 0: return EXIT_NOTESTSCOLLECTED def pytest_collection(session): return session.perform_collect() def pytest_runtestloop(session): if (session.testsfailed and not session.config.option.continue_on_collection_errors): raise session.Interrupted( "%d errors during collection" % session.testsfailed) if session.config.option.collectonly: return True for i, item in enumerate(session.items): nextitem = session.items[i + 1] if i + 1 < len(session.items) else None item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) if session.shouldstop: raise session.Interrupted(session.shouldstop) return True def _in_venv(path): """Attempts to detect if ``path`` is the root of a Virtual Environment by checking for the existence of the appropriate activate script""" bindir = path.join('Scripts' if sys.platform.startswith('win') else 'bin') if not bindir.exists(): return False activates = ('activate', 'activate.csh', 'activate.fish', 'Activate', 'Activate.bat', 'Activate.ps1') return any([fname.basename in activates for fname in bindir.listdir()]) def pytest_ignore_collect(path, config): ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath()) ignore_paths = ignore_paths or [] excludeopt = config.getoption("ignore") if excludeopt: ignore_paths.extend([py.path.local(x) for x in excludeopt]) if py.path.local(path) in ignore_paths: return True allow_in_venv = config.getoption("collect_in_virtualenv") if _in_venv(path) and not allow_in_venv: return True # Skip duplicate paths. keepduplicates = config.getoption("keepduplicates") duplicate_paths = config.pluginmanager._duplicatepaths if not keepduplicates: if path in duplicate_paths: return True else: duplicate_paths.add(path) return False class FSHookProxy: def __init__(self, fspath, pm, remove_mods): self.fspath = fspath self.pm = pm self.remove_mods = remove_mods def __getattr__(self, name): x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) self.__dict__[name] = x return x class _CompatProperty(object): def __init__(self, name): self.name = name def __get__(self, obj, owner): if obj is None: return self # TODO: reenable in the features branch # warnings.warn( # "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format( # name=self.name, owner=type(owner).__name__), # PendingDeprecationWarning, stacklevel=2) return getattr(__import__('pytest'), self.name) class NodeKeywords(MappingMixin): def __init__(self, node): self.node = node self.parent = node.parent self._markers = {node.name: True} def __getitem__(self, key): try: return self._markers[key] except KeyError: if self.parent is None: raise return self.parent.keywords[key] def __setitem__(self, key, value): self._markers[key] = value def __delitem__(self, key): raise ValueError("cannot delete key in keywords dict") def __iter__(self): seen = set(self._markers) if self.parent is not None: seen.update(self.parent.keywords) return iter(seen) def __len__(self): return len(self.__iter__()) def keys(self): return list(self) def __repr__(self): return "<NodeKeywords for node %s>" % (self.node, ) class Node(object): """ base class for Collector and Item the test collection tree. Collector subclasses have children, Items are terminal nodes.""" def __init__(self, name, parent=None, config=None, session=None): #: a unique name within the scope of the parent node self.name = name #: the parent collector node. self.parent = parent #: the pytest config object self.config = config or parent.config #: the session this node is part of self.session = session or parent.session #: filesystem path where this node was collected from (can be None) self.fspath = getattr(parent, 'fspath', None) #: keywords/markers collected from all scopes self.keywords = NodeKeywords(self) #: allow adding of extra keywords to use for matching self.extra_keyword_matches = set() # used for storing artificial fixturedefs for direct parametrization self._name2pseudofixturedef = {} @property def ihook(self): """ fspath sensitive hook proxy used to call pytest hooks""" return self.session.gethookproxy(self.fspath) Module = _CompatProperty("Module") Class = _CompatProperty("Class") Instance = _CompatProperty("Instance") Function = _CompatProperty("Function") File = _CompatProperty("File") Item = _CompatProperty("Item") def _getcustomclass(self, name): maybe_compatprop = getattr(type(self), name) if isinstance(maybe_compatprop, _CompatProperty): return getattr(__import__('pytest'), name) else: cls = getattr(self, name) # TODO: reenable in the features branch # warnings.warn("use of node.%s is deprecated, " # "use pytest_pycollect_makeitem(...) to create custom " # "collection nodes" % name, category=DeprecationWarning) return cls def __repr__(self): return "<%s %r>" % (self.__class__.__name__, getattr(self, 'name', None)) def warn(self, code, message): """ generate a warning with the given code and message for this item. """ assert isinstance(code, str) fslocation = getattr(self, "location", None) if fslocation is None: fslocation = getattr(self, "fspath", None) self.ihook.pytest_logwarning.call_historic(kwargs=dict( code=code, message=message, nodeid=self.nodeid, fslocation=fslocation)) # methods for ordering nodes @property def nodeid(self): """ a ::-separated string denoting its collection tree address. """ try: return self._nodeid except AttributeError: self._nodeid = x = self._makeid() return x def _makeid(self): return self.parent.nodeid + "::" + self.name def __hash__(self): return hash(self.nodeid) def setup(self): pass def teardown(self): pass def _memoizedcall(self, attrname, function): exattrname = "_ex_" + attrname failure = getattr(self, exattrname, None) if failure is not None: py.builtin._reraise(failure[0], failure[1], failure[2]) if hasattr(self, attrname): return getattr(self, attrname) try: res = function() except py.builtin._sysex: raise except: # noqa failure = sys.exc_info() setattr(self, exattrname, failure) raise setattr(self, attrname, res) return res def listchain(self): """ return list of all parent collectors up to self, starting from root of collection tree. """ chain = [] item = self while item is not None: chain.append(item) item = item.parent chain.reverse() return chain def add_marker(self, marker): """ dynamically add a marker object to the node. ``marker`` can be a string or pytest.mark.* instance. """ from _pytest.mark import MarkDecorator, MARK_GEN if isinstance(marker, py.builtin._basestring): marker = getattr(MARK_GEN, marker) elif not isinstance(marker, MarkDecorator): raise ValueError("is not a string or pytest.mark.* Marker") self.keywords[marker.name] = marker def get_marker(self, name): """ get a marker object from this node or None if the node doesn't have a marker with that name. """ val = self.keywords.get(name, None) if val is not None: from _pytest.mark import MarkInfo, MarkDecorator if isinstance(val, (MarkDecorator, MarkInfo)): return val def listextrakeywords(self): """ Return a set of all extra keywords in self and any parents.""" extra_keywords = set() item = self for item in self.listchain(): extra_keywords.update(item.extra_keyword_matches) return extra_keywords def listnames(self): return [x.name for x in self.listchain()] def addfinalizer(self, fin): """ register a function to be called when this node is finalized. This method can only be called when this node is active in a setup chain, for example during self.setup(). """ self.session._setupstate.addfinalizer(fin, self) def getparent(self, cls): """ get the next parent node (including ourself) which is an instance of the given class""" current = self while current and not isinstance(current, cls): current = current.parent return current def _prunetraceback(self, excinfo): pass def _repr_failure_py(self, excinfo, style=None): fm = self.session._fixturemanager if excinfo.errisinstance(fm.FixtureLookupError): return excinfo.value.formatrepr() tbfilter = True if self.config.option.fulltrace: style = "long" else: tb = _pytest._code.Traceback([excinfo.traceback[-1]]) self._prunetraceback(excinfo) if len(excinfo.traceback) == 0: excinfo.traceback = tb tbfilter = False # prunetraceback already does it if style == "auto": style = "long" # XXX should excinfo.getrepr record all data and toterminal() process it? if style is None: if self.config.option.tbstyle == "short": style = "short" else: style = "long" try: os.getcwd() abspath = False except OSError: abspath = True return excinfo.getrepr(funcargs=True, abspath=abspath, showlocals=self.config.option.showlocals, style=style, tbfilter=tbfilter) repr_failure = _repr_failure_py class Collector(Node): """ Collector instances create children through collect() and thus iteratively build a tree. """ class CollectError(Exception): """ an error during collection, contains a custom message. """ def collect(self): """ returns a list of children (items and collectors) for this collection node. """ raise NotImplementedError("abstract") def repr_failure(self, excinfo): """ represent a collection failure. """ if excinfo.errisinstance(self.CollectError): exc = excinfo.value return str(exc.args[0]) return self._repr_failure_py(excinfo, style="short") def _prunetraceback(self, excinfo): if hasattr(self, 'fspath'): traceback = excinfo.traceback ntraceback = traceback.cut(path=self.fspath) if ntraceback == traceback: ntraceback = ntraceback.cut(excludepath=tracebackcutdir) excinfo.traceback = ntraceback.filter() class FSCollector(Collector): def __init__(self, fspath, parent=None, config=None, session=None): fspath = py.path.local(fspath) # xxx only for test_resultlog.py? name = fspath.basename if parent is not None: rel = fspath.relto(parent.fspath) if rel: name = rel name = name.replace(os.sep, nodes.SEP) super(FSCollector, self).__init__(name, parent, config, session) self.fspath = fspath def _check_initialpaths_for_relpath(self): for initialpath in self.session._initialpaths: if self.fspath.common(initialpath) == initialpath: return self.fspath.relto(initialpath.dirname) def _makeid(self): relpath = self.fspath.relto(self.config.rootdir) if not relpath: relpath = self._check_initialpaths_for_relpath() if os.sep != nodes.SEP: relpath = relpath.replace(os.sep, nodes.SEP) return relpath class File(FSCollector): """ base class for collecting tests from a file. """ class Item(Node): """ a basic test invocation item. Note that for a single function there might be multiple test invocation items. """ nextitem = None def __init__(self, name, parent=None, config=None, session=None): super(Item, self).__init__(name, parent, config, session) self._report_sections = [] def add_report_section(self, when, key, content): """ Adds a new report section, similar to what's done internally to add stdout and stderr captured output:: item.add_report_section("call", "stdout", "report section contents") :param str when: One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. :param str key: Name of the section, can be customized at will. Pytest uses ``"stdout"`` and ``"stderr"`` internally. :param str content: The full contents as a string. """ if content: self._report_sections.append((when, key, content)) def reportinfo(self): return self.fspath, None, "" @property def location(self): try: return self._location except AttributeError: location = self.reportinfo() # bestrelpath is a quite slow function cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) try: fspath = cache[location[0]] except KeyError: fspath = self.session.fspath.bestrelpath(location[0]) cache[location[0]] = fspath location = (fspath, location[1], str(location[2])) self._location = location return location class NoMatch(Exception): """ raised if matching cannot locate a matching names. """ class Interrupted(KeyboardInterrupt): """ signals an interrupted test run. """ __module__ = 'builtins' # for py3 class Session(FSCollector): Interrupted = Interrupted def __init__(self, config): FSCollector.__init__(self, config.rootdir, parent=None, config=config, session=self) self.testsfailed = 0 self.testscollected = 0 self.shouldstop = False self.trace = config.trace.root.get("collection") self._norecursepatterns = config.getini("norecursedirs") self.startdir = py.path.local() self.config.pluginmanager.register(self, name="session") def _makeid(self): return "" @hookimpl(tryfirst=True) def pytest_collectstart(self): if self.shouldstop: raise self.Interrupted(self.shouldstop) @hookimpl(tryfirst=True) def pytest_runtest_logreport(self, report): if report.failed and not hasattr(report, 'wasxfail'): self.testsfailed += 1 maxfail = self.config.getvalue("maxfail") if maxfail and self.testsfailed >= maxfail: self.shouldstop = "stopping after %d failures" % ( self.testsfailed) pytest_collectreport = pytest_runtest_logreport def isinitpath(self, path): return path in self._initialpaths def gethookproxy(self, fspath): # check if we have the common case of running # hooks with all conftest.py filesall conftest.py pm = self.config.pluginmanager my_conftestmodules = pm._getconftestmodules(fspath) remove_mods = pm._conftest_plugins.difference(my_conftestmodules) if remove_mods: # one or more conftests are not in use at this fspath proxy = FSHookProxy(fspath, pm, remove_mods) else: # all plugis are active for this fspath proxy = self.config.hook return proxy def perform_collect(self, args=None, genitems=True): hook = self.config.hook try: items = self._perform_collect(args, genitems) self.config.pluginmanager.check_pending() hook.pytest_collection_modifyitems(session=self, config=self.config, items=items) finally: hook.pytest_collection_finish(session=self) self.testscollected = len(items) return items def _perform_collect(self, args, genitems): if args is None: args = self.config.args self.trace("perform_collect", self, args) self.trace.root.indent += 1 self._notfound = [] self._initialpaths = set() self._initialparts = [] self.items = items = [] for arg in args: parts = self._parsearg(arg) self._initialparts.append(parts) self._initialpaths.add(parts[0]) rep = collect_one_node(self) self.ihook.pytest_collectreport(report=rep) self.trace.root.indent -= 1 if self._notfound: errors = [] for arg, exc in self._notfound: line = "(no name %r in any of %r)" % (arg, exc.args[0]) errors.append("not found: %s\n%s" % (arg, line)) # XXX: test this raise UsageError(*errors) if not genitems: return rep.result else: if rep.passed: for node in rep.result: self.items.extend(self.genitems(node)) return items def collect(self): for parts in self._initialparts: arg = "::".join(map(str, parts)) self.trace("processing argument", arg) self.trace.root.indent += 1 try: for x in self._collect(arg): yield x except NoMatch: # we are inside a make_report hook so # we cannot directly pass through the exception self._notfound.append((arg, sys.exc_info()[1])) self.trace.root.indent -= 1 def _collect(self, arg): names = self._parsearg(arg) path = names.pop(0) if path.check(dir=1): assert not names, "invalid arg %r" % (arg,) for path in path.visit(fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True): for x in self._collectfile(path): yield x else: assert path.check(file=1) for x in self.matchnodes(self._collectfile(path), names): yield x def _collectfile(self, path): ihook = self.gethookproxy(path) if not self.isinitpath(path): if ihook.pytest_ignore_collect(path=path, config=self.config): return () return ihook.pytest_collect_file(path=path, parent=self) def _recurse(self, path): ihook = self.gethookproxy(path.dirpath()) if ihook.pytest_ignore_collect(path=path, config=self.config): return for pat in self._norecursepatterns: if path.check(fnmatch=pat): return False ihook = self.gethookproxy(path) ihook.pytest_collect_directory(path=path, parent=self) return True def _tryconvertpyarg(self, x): """Convert a dotted module name to path. """ import pkgutil try: loader = pkgutil.find_loader(x) except ImportError: return x if loader is None: return x # This method is sometimes invoked when AssertionRewritingHook, which # does not define a get_filename method, is already in place: try: path = loader.get_filename(x) except AttributeError: # Retrieve path from AssertionRewritingHook: path = loader.modules[x][0].co_filename if loader.is_package(x): path = os.path.dirname(path) return path def _parsearg(self, arg): """ return (fspath, names) tuple after checking the file exists. """ parts = str(arg).split("::") if self.config.option.pyargs: parts[0] = self._tryconvertpyarg(parts[0]) relpath = parts[0].replace("/", os.sep) path = self.config.invocation_dir.join(relpath, abs=True) if not path.check(): if self.config.option.pyargs: raise UsageError( "file or package not found: " + arg + " (missing __init__.py?)") else: raise UsageError("file not found: " + arg) parts[0] = path return parts def matchnodes(self, matching, names): self.trace("matchnodes", matching, names) self.trace.root.indent += 1 nodes = self._matchnodes(matching, names) num = len(nodes) self.trace("matchnodes finished -> ", num, "nodes") self.trace.root.indent -= 1 if num == 0: raise NoMatch(matching, names[:1]) return nodes def _matchnodes(self, matching, names): if not matching or not names: return matching name = names[0] assert name nextnames = names[1:] resultnodes = [] for node in matching: if isinstance(node, Item): if not names: resultnodes.append(node) continue assert isinstance(node, Collector) rep = collect_one_node(node) if rep.passed: has_matched = False for x in rep.result: # TODO: remove parametrized workaround once collection structure contains parametrization if x.name == name or x.name.split("[")[0] == name: resultnodes.extend(self.matchnodes([x], nextnames)) has_matched = True # XXX accept IDs that don't have "()" for class instances if not has_matched and len(rep.result) == 1 and x.name == "()": nextnames.insert(0, name) resultnodes.extend(self.matchnodes([x], nextnames)) else: # report collection failures here to avoid failing to run some test # specified in the command line because the module could not be # imported (#134) node.ihook.pytest_collectreport(report=rep) return resultnodes def genitems(self, node): self.trace("genitems", node) if isinstance(node, Item): node.ihook.pytest_itemcollected(item=node) yield node else: assert isinstance(node, Collector) rep = collect_one_node(node) if rep.passed: for subnode in rep.result: for x in self.genitems(subnode): yield x node.ihook.pytest_collectreport(report=rep)
iulian787/spack
lib/spack/external/_pytest/main.py
Python
lgpl-2.1
30,081
[ "VisIt" ]
143c583ddf10deee9907e9cbe128427e220752e97ab5e8a6994962979a0bf4b0
''' :mod: Utils Module that collects utility functions. ''' __RCSID__ = '$Id$' import fnmatch from DIRAC import gConfig, S_OK from DIRAC.Core.Utilities import List from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations def voimport( base_mod ): ''' Function to import from extensions, if not found, tries from DIRAC. ''' for ext in gConfig.getValue( 'DIRAC/Extensions', [] ): try: return __import__( ext + base_mod, globals(), locals(), ['*'] ) except ImportError: continue # If not found in extensions, import it in DIRAC base. return __import__( base_mod, globals(), locals(), ['*'] ) def getCSTree( csPath = '' ): ''' Gives the configuration rooted at path in a Python dict. The result is a Python dictionary that reflects the structure of the configuration file. ''' opHelper = Operations() def getCSTreeAsDict( treePath ): ''' Function to recursively iterate over a CS tree ''' csTreeDict = {} opts = opHelper.getOptionsDict( treePath ) if opts[ 'OK' ]: opts = opts[ 'Value' ] for optKey, optValue in opts.items(): if optValue.find( ',' ) > -1: optValue = List.fromChar( optValue ) else: optValue = [ optValue ] csTreeDict[ optKey ] = optValue secs = opHelper.getSections( treePath ) if secs[ 'OK' ]: secs = secs[ 'Value' ] for sec in secs: secTree = getCSTreeAsDict( '%s/%s' % ( treePath, sec ) ) if not secTree[ 'OK' ]: return secTree csTreeDict[ sec ] = secTree[ 'Value' ] return S_OK( csTreeDict ) return getCSTreeAsDict( csPath ) def configMatch( candidateParams, configParams ): ''' For a given configuration, the candidate will be rejected if: - it is missing at least one of the params in the config - if a param of the candidate does not match the config params - if a candidate param is None, is considered as wildcard ''' for key in candidateParams: if key not in configParams: # The candidateParams is missing one of the parameters required # return False continue if candidateParams[ key ] is None: # None is assumed to be a wildcard (*) continue cParameter = candidateParams[ key ] if not isinstance( cParameter, list ): cParameter = [ cParameter ] # We allow using UNIX-like regular expression ( wild-cards ) on the CS _matches = False for configItem in configParams[ key ]: if fnmatch.filter( set( cParameter ), configItem ): _matches = True break if not _matches: return False return True ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
andresailer/DIRAC
ResourceStatusSystem/Utilities/Utils.py
Python
gpl-3.0
2,920
[ "DIRAC" ]
c5b7f5e4e48d89b566e274ccef3d631a1b08f3750f26f063e58f81a2f81a211b
import pybullet as p from time import sleep physicsClient = p.connect(p.GUI) p.resetSimulation(p.RESET_USE_DEFORMABLE_WORLD) p.setGravity(0, 0, -10) planeOrn = [0,0,0,1]#p.getQuaternionFromEuler([0.3,0,0]) planeId = p.loadURDF("plane.urdf", [0,0,-2],planeOrn) boxId = p.loadURDF("cube.urdf", [0,3,2],useMaximalCoordinates = True) ballId = p.loadSoftBody("ball.vtk", basePosition = [0,0,-1], scale = 0.5, mass = 0.1, useNeoHookean = 1, NeoHookeanMu = 20, NeoHookeanLambda = 20, NeoHookeanDamping = 0.001, useSelfCollision = 1, frictionCoeff = .5) p.setTimeStep(0.001) p.setPhysicsEngineParameter(sparseSdfVoxelSize=0.25) p.setRealTimeSimulation(1) #logId = p.startStateLogging(p.STATE_LOGGING_PROFILE_TIMINGS, "perf.json") while p.isConnected(): p.setGravity(0,0,-10) sleep(1./240.) #p.resetSimulation() #p.stopStateLogging(logId)
MadManRises/Madgine
shared/bullet3-2.89/examples/pybullet/examples/deformable_ball.py
Python
mit
848
[ "VTK" ]
7ba1a0b7348abd6fd472e4b6d8f85a6c0a3d4fe9c90a45ed9aaf73640c91a6e4
import random from scipy.stats import norm class Lattice1D: """A class that represents a disordered one dimensional lattice""" def __init__(self, mean_separation, sigma_separation): self.mean_separation = mean_separation self.sigma_separation = sigma_separation def get_sigma(self): return self.sigma_separation def generate_positions(self, min_pos, max_pos): """ Generates a list of positions between min_pos and max_pos, using a normal distribution around fixed lattice sites :param min_pos: minimum value of position :param max_pos: maximum value of position :return: list of ordered positions """ result = [0.0] result.extend(self.generate_until(max_pos)) neg_positions = [-x for x in self.generate_until(-min_pos)] result.extend(neg_positions) result.sort() return result def generate_until(self, max_pos): """ Generates a list of positions between zero and max_pos, not including the zero position :param max_pos: maximum value of position :return: list of positions """ if max_pos < 0.0: return [] result = [] index = 1 last_pos = self.generate_indexed_pos(index) while last_pos < max_pos: result.append(last_pos) index += 1 last_pos = self.generate_indexed_pos(index) return result def generate_indexed_pos(self, index): """ Generates a single position with given index :param index: index for position, zero is the position at the origin :return: position """ return random.gauss(mu=index*self.mean_separation, sigma=self.sigma_separation) def pdf_list(self, values, samples, bin_size): """ Calculates the pdf of the distribution for a list of values, excluding the Dirac delta at zero :param values: positions :param samples: number samples taken :param bin_size: size of the bins :return: probability density """ result = [] max_dist = 5.0*self.sigma_separation # discard influence at distance 5*sigma nmax = int(self.mean_separation/max_dist) for x in values: nearest_index = int(round(x/self.mean_separation)) nearest_pos = nearest_index*self.mean_separation df = 0.0 start_index = nearest_index - nmax for j in range(2*nmax+1): current_index = start_index + j if current_index != 0: df += norm.pdf(x, current_index * self.mean_separation, self.sigma_separation) result.append(samples*bin_size*df) return result def max_value(self, samples, bin_size): return samples*bin_size*norm.pdf(0.0, 0.0, self.sigma_separation)
waltervh/BornAgain-tutorial
old/bornagain-python/iff/Lattice1D.py
Python
gpl-3.0
2,912
[ "DIRAC" ]
2a07ce065d1d51c02c1e33b86920d0516d48fbd17695bad1e4000cffb2634496
#!/usr/bin/env python # -*- Mode: Python -*- # GObject-Introspection - a framework for introspecting GObject libraries # Copyright (C) 2010 Zach Goldberg # Copyright (C) 2011 Johan Dahlin # Copyright (C) 2011 Shaun McCance # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. # from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import re import tempfile from xml.sax import saxutils from mako.lookup import TemplateLookup from . import ast, xmlwriter from .utils import to_underscores def make_page_id(node, recursive=False): if isinstance(node, ast.Namespace): if recursive: return node.name else: return 'index' if hasattr(node, '_chain') and node._chain: parent = node._chain[-1] else: parent = getattr(node, 'parent', None) if parent is None: if isinstance(node, ast.Function) and node.shadows: return '%s.%s' % (node.namespace.name, node.shadows) else: return '%s.%s' % (node.namespace.name, node.name) if isinstance(node, (ast.Property, ast.Signal, ast.VFunction, ast.Field)): return '%s-%s' % (make_page_id(parent, recursive=True), node.name) elif isinstance(node, ast.Function) and node.shadows: return '%s.%s' % (make_page_id(parent, recursive=True), node.shadows) else: return '%s.%s' % (make_page_id(parent, recursive=True), node.name) def get_node_kind(node): if isinstance(node, ast.Namespace): node_kind = 'namespace' elif isinstance(node, (ast.Class, ast.Boxed, ast.Compound)): node_kind = 'class' elif isinstance(node, ast.Interface): node_kind = 'interface' elif isinstance(node, ast.Record): node_kind = 'record' elif isinstance(node, ast.Function): if node.is_method: node_kind = 'method' elif node.is_constructor: node_kind = 'constructor' else: node_kind = 'function' elif isinstance(node, (ast.Enum, ast.Bitfield)): node_kind = 'enum' elif isinstance(node, ast.Property) and node.parent is not None: node_kind = 'property' elif isinstance(node, ast.Signal) and node.parent is not None: node_kind = 'signal' elif isinstance(node, ast.VFunction) and node.parent is not None: node_kind = 'vfunc' elif isinstance(node, ast.Callable): node_kind = 'callback' elif isinstance(node, ast.Field): node_kind = 'field' else: node_kind = 'default' return node_kind class TemplatedScanner(object): def __init__(self, specs): self.specs = self.unmangle_specs(specs) self.regex = self.make_regex(self.specs) def unmangle_specs(self, specs): mangled = re.compile('<<([a-zA-Z_:]+)>>') specdict = dict((name.lstrip('!'), spec) for name, spec in specs) def unmangle(spec, name=None): def replace_func(match): child_spec_name = match.group(1) if ':' in child_spec_name: pattern_name, child_spec_name = child_spec_name.split(':', 1) else: pattern_name = None child_spec = specdict[child_spec_name] # Force all child specs of this one to be unnamed unmangled = unmangle(child_spec, None) if pattern_name and name: return '(?P<%s_%s>%s)' % (name, pattern_name, unmangled) else: return unmangled return mangled.sub(replace_func, spec) return [(name, unmangle(spec, name)) for name, spec in specs] def make_regex(self, specs): regex = '|'.join('(?P<%s>%s)' % (name, spec) for name, spec in specs if not name.startswith('!')) return re.compile(regex) def get_properties(self, name, match): groupdict = match.groupdict() properties = {name: groupdict.pop(name)} name = name + "_" for group, value in groupdict.items(): if group.startswith(name): key = group[len(name):] properties[key] = value return properties def scan(self, text): pos = 0 while True: match = self.regex.search(text, pos) if match is None: break start = match.start() if start > pos: yield ('other', text[pos:start], None) pos = match.end() name = match.lastgroup yield (name, match.group(0), self.get_properties(name, match)) if pos < len(text): yield ('other', text[pos:], None) class DocstringScanner(TemplatedScanner): def __init__(self): specs = [ ('!alpha', r'[a-zA-Z0-9_]+'), ('!alpha_dash', r'[a-zA-Z0-9_-]+'), ('property', r'#<<type_name:alpha>>:(<<property_name:alpha_dash>>)'), ('signal', r'#<<type_name:alpha>>::(<<signal_name:alpha_dash>>)'), ('type_name', r'#(<<type_name:alpha>>)'), ('enum_value', r'%(<<member_name:alpha>>)'), ('parameter', r'@<<param_name:alpha>>'), ('function_call', r'<<symbol_name:alpha>>\(\)'), ] super(DocstringScanner, self).__init__(specs) class DocFormatter(object): def __init__(self, transformer): self._transformer = transformer self._scanner = DocstringScanner() def escape(self, text): return saxutils.escape(text) def should_render_node(self, node): if getattr(node, "private", False): return False # Nodes without namespace are AST bugs really # They are used for structs and unions declared # inline inside other structs, but they are not # even picked up by g-ir-compiler, because they # don't create a <type/> element. # So just ignore them. if isinstance(node, ast.Node) and node.namespace is None: return False return True def format(self, node, doc): if doc is None: return '' result = '' for para in doc.split('\n\n'): result += ' <p>' result += self.format_inline(node, para) result += '</p>' return result def _resolve_type(self, ident): try: matches = self._transformer.split_ctype_namespaces(ident) except ValueError: return None for namespace, name in matches: node = namespace.get(name) if node: return node return None def _resolve_symbol(self, symbol): try: matches = self._transformer.split_csymbol_namespaces(symbol) except ValueError: return None for namespace, name in matches: node = namespace.get_by_symbol(symbol) if node: return node return None def _find_thing(self, list_, name): for item in list_: if item.name == name: return item raise KeyError("Could not find %s" % (name, )) def _process_other(self, node, match, props): return self.escape(match) def _process_property(self, node, match, props): type_node = self._resolve_type(props['type_name']) if type_node is None: return match try: prop = self._find_thing(type_node.properties, props['property_name']) except (AttributeError, KeyError): return match return self.format_xref(prop) def _process_signal(self, node, match, props): type_node = self._resolve_type(props['type_name']) if type_node is None: return match try: signal = self._find_thing(type_node.signals, props['signal_name']) except (AttributeError, KeyError): return match return self.format_xref(signal) def _process_type_name(self, node, match, props): type_ = self._resolve_type(props['type_name']) if type_ is None: return match return self.format_xref(type_) def _process_enum_value(self, node, match, props): member_name = props['member_name'] try: return '<code>%s</code>' % (self.fundamentals[member_name], ) except KeyError: pass enum_value = self._resolve_symbol(member_name) if enum_value: return self.format_xref(enum_value) return match def _process_parameter(self, node, match, props): try: parameter = node.get_parameter(props['param_name']) except (AttributeError, ValueError): return match return '<code>%s</code>' % (self.format_parameter_name(node, parameter), ) def _process_function_call(self, node, match, props): func = self._resolve_symbol(props['symbol_name']) if func is None: return match return self.format_xref(func) def _process_token(self, node, tok): kind, match, props = tok dispatch = { 'other': self._process_other, 'property': self._process_property, 'signal': self._process_signal, 'type_name': self._process_type_name, 'enum_value': self._process_enum_value, 'parameter': self._process_parameter, 'function_call': self._process_function_call, } return dispatch[kind](node, match, props) def get_in_parameters(self, node): raise NotImplementedError def format_inline(self, node, para): tokens = self._scanner.scan(para) words = [self._process_token(node, tok) for tok in tokens] return ''.join(words) def format_parameter_name(self, node, parameter): if isinstance(parameter.type, ast.Varargs): return "..." else: return parameter.argname def format_function_name(self, func): raise NotImplementedError def format_type(self, type_, link=False): raise NotImplementedError def format_page_name(self, node): if isinstance(node, ast.Namespace): return node.name elif isinstance(node, ast.Function): return self.format_function_name(node) elif isinstance(node, ast.Property) and node.parent is not None: return '%s:%s' % (self.format_page_name(node.parent), node.name) elif isinstance(node, ast.Signal) and node.parent is not None: return '%s::%s' % (self.format_page_name(node.parent), node.name) elif isinstance(node, ast.VFunction) and node.parent is not None: return '%s::%s' % (self.format_page_name(node.parent), node.name) elif isinstance(node, ast.Field) and node.parent is not None: return '%s->%s' % (self.format_page_name(node.parent), node.name) else: return make_page_id(node) def format_xref(self, node, **attrdict): if node is None or not hasattr(node, 'namespace'): attrs = [('xref', 'index')] + list(sorted(attrdict.items())) return xmlwriter.build_xml_tag('link', attrs) elif isinstance(node, ast.Member): # Enum/BitField members are linked to the main enum page. return self.format_xref(node.parent, **attrdict) + '.' + node.name elif node.namespace is self._transformer.namespace: return self.format_internal_xref(node, attrdict) else: return self.format_external_xref(node, attrdict) def format_internal_xref(self, node, attrdict): attrs = [('xref', make_page_id(node))] + list(sorted(attrdict.items())) return xmlwriter.build_xml_tag('link', attrs) def format_external_xref(self, node, attrdict): ns = node.namespace attrs = [('href', '../%s-%s/%s.html' % (ns.name, str(ns.version), make_page_id(node)))] attrs += list(sorted(attrdict.items())) return xmlwriter.build_xml_tag('link', attrs, self.format_page_name(node)) def field_is_writable(self, field): return True def format_property_flags(self, property_, construct_only=False): flags = [] if property_.readable and not construct_only: flags.append("Read") if property_.writable and not construct_only and \ self.field_is_writable(property_): flags.append("Write") if isinstance(property_, ast.Property): if property_.construct: flags.append("Construct") if property_.construct_only: flags.append("Construct Only") return " / ".join(flags) def to_underscores(self, node): if isinstance(node, ast.Property): return node.name.replace('-', '_') elif node.name: return to_underscores(node.name) elif isinstance(node, ast.Callback): return 'callback' elif isinstance(node, ast.Union): return 'anonymous_union' elif isinstance(node, ast.Field): return 'anonymous field' else: raise Exception('invalid node') def to_lower_camel_case(self, string): return string[0].lower() + string[1:] def get_class_hierarchy(self, node): assert isinstance(node, ast.Class) parent_chain = [node] while node.parent_type: node = self._transformer.lookup_typenode(node.parent_type) parent_chain.append(node) parent_chain.reverse() return parent_chain def format_prerequisites(self, node): assert isinstance(node, ast.Interface) if len(node.prerequisites) > 0: if len(node.prerequisites) > 1: return ', '.join(node.prerequisites[:-1]) + \ ' and ' + node.prerequisites[-1] else: return node.prerequisites[0] else: return 'GObject.Object' def format_known_implementations(self, node): assert isinstance(node, ast.Interface) node_name = node.namespace.name + '.' + node.name impl = [] for c in node.namespace.values(): if not isinstance(c, ast.Class): continue for implemented in c.interfaces: if implemented.target_giname == node_name: impl.append(c) break if len(impl) == 0: return 'None' else: out = '%s is implemented by ' % (node.name,) if len(impl) == 1: return out + impl[0].name else: return out + ', '.join(i.name for i in impl[:-1]) + \ ' and ' + impl[-1].name class DocFormatterC(DocFormatter): language = "C" mime_type = "text/x-csrc" fundamentals = { "TRUE": "TRUE", "FALSE": "FALSE", "NULL": "NULL", } def format_type(self, type_, link=False): if isinstance(type_, ast.Array): return self.format_type(type_.element_type) + '*' elif type_.ctype is not None: return type_.ctype elif type_.target_fundamental: return type_.target_fundamental else: node = self._transformer.lookup_typenode(type_) return getattr(node, 'ctype') def format_function_name(self, func): if isinstance(func, ast.Function): return func.symbol else: return func.name def get_in_parameters(self, node): return node.all_parameters class DocFormatterIntrospectableBase(DocFormatter): def should_render_node(self, node): if isinstance(node, ast.Record) and node.is_gtype_struct_for is not None: return False if not getattr(node, "introspectable", True): return False if isinstance(node, ast.Function) and node.shadowed_by is not None: return False return super(DocFormatterIntrospectableBase, self).should_render_node(node) class DocFormatterPython(DocFormatterIntrospectableBase): language = "Python" mime_type = "text/python" fundamentals = { "TRUE": "True", "FALSE": "False", "NULL": "None", } def should_render_node(self, node): if getattr(node, "is_constructor", False): return False return super(DocFormatterPython, self).should_render_node(node) def is_method(self, node): if getattr(node, "is_method", False): return True if isinstance(node, ast.VFunction): return True return False def format_parameter_name(self, node, parameter): # Force "self" for the first parameter of a method if self.is_method(node) and parameter is node.instance_parameter: return "self" elif isinstance(parameter.type, ast.Varargs): return "..." else: return parameter.argname def format_fundamental_type(self, name): fundamental_types = { "utf8": "unicode", "gunichar": "unicode", "gchar": "str", "guchar": "str", "gboolean": "bool", "gint": "int", "guint": "int", "glong": "int", "gulong": "int", "gint64": "int", "guint64": "int", "gfloat": "float", "gdouble": "float", "gchararray": "str", "GParam": "GLib.Param", "PyObject": "object", "GStrv": "[str]", "GVariant": "GLib.Variant"} return fundamental_types.get(name, name) def format_type(self, type_, link=False): if isinstance(type_, (ast.List, ast.Array)): return '[' + self.format_type(type_.element_type) + ']' elif isinstance(type_, ast.Map): return '{%s: %s}' % (self.format_type(type_.key_type), self.format_type(type_.value_type)) elif type_.target_giname is not None: return type_.target_giname else: return self.format_fundamental_type(type_.target_fundamental) def format_function_name(self, func): if func.parent is not None: return "%s.%s" % (self.format_page_name(func.parent), func.name) else: return func.name def get_in_parameters(self, node): return node.all_parameters class DocFormatterGjs(DocFormatterIntrospectableBase): language = "Gjs" mime_type = "text/x-gjs" fundamentals = { "TRUE": "true", "FALSE": "false", "NULL": "null", } def is_method(self, node): if getattr(node, "is_method", False): return True if isinstance(node, ast.VFunction): return True return False def resolve_gboxed_constructor(self, node): zero_args_constructor = None default_constructor = None introspectable_constructors = \ list(filter(lambda c: getattr(c, 'introspectable', True), node.constructors)) for c in introspectable_constructors: if zero_args_constructor is None and \ len(c.parameters) == 0: zero_args_constructor = c if default_constructor is None and \ c.name == 'new': default_constructor = c if default_constructor is None: default_constructor = zero_args_constructor if default_constructor is None and \ len(introspectable_constructors) > 0: default_constructor = introspectable_constructors[0] node.gjs_default_constructor = default_constructor node.gjs_zero_args_constructor = zero_args_constructor def should_render_node(self, node): if isinstance(node, (ast.Compound, ast.Boxed)): self.resolve_gboxed_constructor(node) if isinstance(node, ast.Compound) and node.disguised and \ len(node.methods) == len(node.static_methods) == len(node.constructors) == 0: return False if isinstance(node, ast.ErrorQuarkFunction): return False if isinstance(node, ast.Field): if node.type is None: return False if isinstance(node.parent, (ast.Class, ast.Union)): return False if isinstance(node, ast.Union) and node.name is None: return False if isinstance(node, ast.Class): is_gparam_subclass = False if node.parent_type: parent = self._transformer.lookup_typenode(node.parent_type) while parent: if parent.namespace.name == 'GObject' and \ parent.name == 'ParamSpec': is_gparam_subclass = True break if parent.parent_type is None: break parent = self._transformer.lookup_typenode(parent.parent_type) if is_gparam_subclass: return False return super(DocFormatterGjs, self).should_render_node(node) def format_fundamental_type(self, name): fundamental_types = { "none": "void", "gpointer": "void", "gboolean": "Boolean", "gint8": "Number(gint8)", "guint8": "Number(guint8)", "gint16": "Number(gint16)", "guint16": "Number(guint16)", "gint32": "Number(gint32)", "guint32": "Number(guint32)", "gchar": "Number(gchar)", "guchar": "Number(guchar)", "gshort": "Number(gshort)", "gint": "Number(gint)", "guint": "Number(guint)", "gfloat": "Number(gfloat)", "gdouble": "Number(gdouble)", "utf8": "String", "gunichar": "String", "filename": "String", "GType": "GObject.Type", "GVariant": "GLib.Variant", # These cannot be fully represented in gjs "gsize": "Number(gsize)", "gssize": "Number(gssize)", "gintptr": "Number(gintptr)", "guintptr": "Number(guintptr)", "glong": "Number(glong)", "gulong": "Number(gulong)", "gint64": "Number(gint64)", "guint64": "Number(guint64)", "long double": "Number(long double)", "long long": "Number(long long)", "unsigned long long": "Number(unsigned long long)"} return fundamental_types.get(name, name) def format_type(self, type_, link=False): if isinstance(type_, ast.Array) and \ type_.element_type.target_fundamental in ('gint8', 'guint8'): return 'ByteArray' elif isinstance(type_, (ast.List, ast.Array)): return 'Array(' + self.format_type(type_.element_type, link) + ')' elif isinstance(type_, ast.Map): return '{%s: %s}' % (self.format_type(type_.key_type, link), self.format_type(type_.value_type, link)) elif not type_ or type_.target_fundamental == "none": return "void" elif type_.target_giname is not None: giname = type_.target_giname if giname in ('GLib.ByteArray', 'GLib.Bytes'): return 'ByteArray' if giname == 'GObject.Value': return 'Any' if giname == 'GObject.Closure': return 'Function' if link: nsname = self._transformer.namespace.name if giname.startswith(nsname + '.'): return '<link xref="%s">%s</link>' % (giname, giname) else: resolved = self._transformer.lookup_typenode(type_) if resolved: return self.format_xref(resolved) return giname else: return self.format_fundamental_type(type_.target_fundamental) def format_function_name(self, func): name = func.name if func.shadows: name = func.shadows if func.is_method: return "%s.prototype.%s" % (self.format_page_name(func.parent), name) elif func.parent is not None: return "%s.%s" % (self.format_page_name(func.parent), name) else: return name def format_page_name(self, node): if isinstance(node, (ast.Field, ast.Property)): return '%s.%s' % (self.format_page_name(node.parent), self.to_underscores(node)) else: return DocFormatterIntrospectableBase.format_page_name(self, node) def has_any_parameters(self, node): return len(node.parameters) > 0 or \ node.retval.type.target_fundamental != 'none' def get_in_parameters(self, node): skip = set() for param in node.parameters: if param.direction == ast.PARAM_DIRECTION_OUT: skip.add(param) if param.closure_name is not None: skip.add(node.get_parameter(param.closure_name)) if param.destroy_name is not None: skip.add(node.get_parameter(param.destroy_name)) if isinstance(param.type, ast.Array) and param.type.length_param_name is not None: skip.add(node.get_parameter(param.type.length_param_name)) params = [] for param in node.parameters: if param not in skip: params.append(param) return params def get_out_parameters(self, node): skip = set() for param in node.parameters: if param.direction == ast.PARAM_DIRECTION_IN: skip.add(param) if param.closure_name is not None: skip.add(node.get_parameter(param.closure_name)) if param.destroy_name is not None: skip.add(node.get_parameter(param.destroy_name)) if isinstance(param.type, ast.Array) and param.type.length_param_name is not None: skip.add(node.get_parameter(param.type.length_param_name)) params = [] if node.retval.type.target_fundamental != 'none': name = 'return_value' if node.retval.type.target_fundamental == 'gboolean': name = 'ok' ret_param = ast.Parameter(name, node.retval.type, ast.PARAM_DIRECTION_OUT) ret_param.doc = node.retval.doc params.append(ret_param) for param in node.parameters: if param not in skip: params.append(param) if len(params) == 1: params[0].argname = 'Returns' return params def format_in_parameters(self, node): in_params = self.get_in_parameters(node) return ', '.join(('%s: %s' % (p.argname, self.format_type(p.type, True))) for p in in_params) def format_out_parameters(self, node): out_params = self.get_out_parameters(node) if len(out_params) == 0: return 'void' elif len(out_params) == 1: return self.format_type(out_params[0].type, True) else: return '[' + ', '.join(('%s: %s' % (p.argname, self.format_type(p.type, True))) for p in out_params) + ']' def field_is_writable(self, node): if isinstance(node, ast.Field): if node.type is None: return False if node.private: return False if isinstance(node.parent, ast.Union): return False if node.type.target_fundamental not in \ (None, 'none', 'gpointer', 'utf8', 'filename', 'va_list'): return True resolved = self._transformer.lookup_typenode(node.type) if resolved: if isinstance(resolved, ast.Compound) and node.type.ctype[-1] != '*': return self._struct_is_simple(resolved) elif isinstance(resolved, (ast.Enum, ast.Bitfield)): return True return False else: return True def _struct_is_simple(self, node): if node.disguised or len(node.fields) == 0: return False for f in node.fields: if not self.field_is_writable(f): return False return True def format_gboxed_constructor(self, node): if node.namespace.name == 'GLib' and node.name == 'Variant': return 'signature: String, value: Any' zero_args_constructor = node.gjs_zero_args_constructor default_constructor = node.gjs_default_constructor can_allocate = zero_args_constructor is not None if not can_allocate and isinstance(node, ast.Record): can_allocate = self._struct_is_simple(node) # Small lie: if can_allocate is False, and # default_constructor is None, then you cannot # construct the boxed in any way. But let's # pretend you can with the regular constructor if can_allocate or default_constructor is None: if isinstance(node, ast.Compound): fields = filter(self.field_is_writable, node.fields) out = '' for f in fields: out += " <link xref='%s.%s-%s'>%s</link>: value\n" % \ (node.namespace.name, node.name, f.name, f.name) if out: out = "{\n" + out + "}" return out else: return '' else: construct_params = self.get_in_parameters(default_constructor) return ', '.join(('%s: %s' % (p.argname, self.format_type(p.type))) for p in construct_params) LANGUAGES = { "c": DocFormatterC, "python": DocFormatterPython, "gjs": DocFormatterGjs, } class DocWriter(object): def __init__(self, transformer, language): self._transformer = transformer try: formatter_class = LANGUAGES[language.lower()] except KeyError: raise SystemExit("Unsupported language: %s" % (language, )) self._formatter = formatter_class(self._transformer) self._language = self._formatter.language self._lookup = self._get_template_lookup() def _get_template_lookup(self): if 'UNINSTALLED_INTROSPECTION_SRCDIR' in os.environ: top_srcdir = os.environ['UNINSTALLED_INTROSPECTION_SRCDIR'] srcdir = os.path.join(top_srcdir, 'giscanner') else: srcdir = os.path.dirname(__file__) template_dir = os.path.join(srcdir, 'doctemplates') return TemplateLookup(directories=[template_dir], module_directory=tempfile.mkdtemp(), output_encoding='utf-8') def write(self, output): try: os.makedirs(output) except OSError: # directory already made pass self._walk_node(output, self._transformer.namespace, []) self._transformer.namespace.walk(lambda node, chain: self._walk_node(output, node, chain)) def _walk_node(self, output, node, chain): if isinstance(node, ast.Function) and node.moved_to is not None: return False if self._formatter.should_render_node(node): self._render_node(node, chain, output) # hack: fields are not Nodes in the ast, so we don't # see them in the visit. Handle them manually here if isinstance(node, (ast.Compound, ast.Class)): chain.append(node) for f in node.fields: self._walk_node(output, f, chain) chain.pop() return True return False def _render_node(self, node, chain, output): namespace = self._transformer.namespace # A bit of a hack...maybe this should be an official API node._chain = list(chain) page_kind = get_node_kind(node) template_name = '%s/%s.tmpl' % (self._language, page_kind) page_id = make_page_id(node) template = self._lookup.get_template(template_name) result = template.render(namespace=namespace, node=node, page_id=page_id, page_kind=page_kind, formatter=self._formatter, ast=ast) output_file_name = os.path.join(os.path.abspath(output), page_id + '.page') with open(output_file_name, 'wb') as fp: fp.write(result)
anthrotype/gobject-introspection
giscanner/docwriter.py
Python
gpl-2.0
33,885
[ "VisIt" ]
3c92f4b372aa244ac4e880dd5bfb088d93167502db964c73b712df35cde5c636
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import errno import filecmp import os.path import re import tempfile import sys # A minimal memoizing decorator. It'll blow up if the args aren't immutable, # among other "problems". class memoize(object): def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): try: return self.cache[args] except KeyError: result = self.func(*args) self.cache[args] = result return result class GypError(Exception): """Error class representing an error, which is to be presented to the user. The main entry point will catch and display this. """ pass def ExceptionAppend(e, msg): """Append a message to the given exception's message.""" if not e.args: e.args = (msg,) elif len(e.args) == 1: e.args = (str(e.args[0]) + ' ' + msg,) else: e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:] def FindQualifiedTargets(target, qualified_list): """ Given a list of qualified targets, return the qualified targets for the specified |target|. """ return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target] def ParseQualifiedTarget(target): # Splits a qualified target into a build file, target name and toolset. # NOTE: rsplit is used to disambiguate the Windows drive letter separator. target_split = target.rsplit(':', 1) if len(target_split) == 2: [build_file, target] = target_split else: build_file = None target_split = target.rsplit('#', 1) if len(target_split) == 2: [target, toolset] = target_split else: toolset = None return [build_file, target, toolset] def ResolveTarget(build_file, target, toolset): # This function resolves a target into a canonical form: # - a fully defined build file, either absolute or relative to the current # directory # - a target name # - a toolset # # build_file is the file relative to which 'target' is defined. # target is the qualified target. # toolset is the default toolset for that target. [parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target) if parsed_build_file: if build_file: # If a relative path, parsed_build_file is relative to the directory # containing build_file. If build_file is not in the current directory, # parsed_build_file is not a usable path as-is. Resolve it by # interpreting it as relative to build_file. If parsed_build_file is # absolute, it is usable as a path regardless of the current directory, # and os.path.join will return it as-is. build_file = os.path.normpath(os.path.join(os.path.dirname(build_file), parsed_build_file)) # Further (to handle cases like ../cwd), make it relative to cwd) if not os.path.isabs(build_file): build_file = RelativePath(build_file, '.') else: build_file = parsed_build_file if parsed_toolset: toolset = parsed_toolset return [build_file, target, toolset] def BuildFile(fully_qualified_target): # Extracts the build file from the fully qualified target. return ParseQualifiedTarget(fully_qualified_target)[0] def GetEnvironFallback(var_list, default): """Look up a key in the environment, with fallback to secondary keys and finally falling back to a default value.""" for var in var_list: if var in os.environ: return os.environ[var] return default def QualifiedTarget(build_file, target, toolset): # "Qualified" means the file that a target was defined in and the target # name, separated by a colon, suffixed by a # and the toolset name: # /path/to/file.gyp:target_name#toolset fully_qualified = build_file + ':' + target if toolset: fully_qualified = fully_qualified + '#' + toolset return fully_qualified @memoize def RelativePath(path, relative_to): # Assuming both |path| and |relative_to| are relative to the current # directory, returns a relative path that identifies path relative to # relative_to. # Convert to normalized (and therefore absolute paths). path = os.path.realpath(path) relative_to = os.path.realpath(relative_to) # On Windows, we can't create a relative path to a different drive, so just # use the absolute path. if sys.platform == 'win32': if (os.path.splitdrive(path)[0].lower() != os.path.splitdrive(relative_to)[0].lower()): return path # Split the paths into components. path_split = path.split(os.path.sep) relative_to_split = relative_to.split(os.path.sep) # Determine how much of the prefix the two paths share. prefix_len = len(os.path.commonprefix([path_split, relative_to_split])) # Put enough ".." components to back up out of relative_to to the common # prefix, and then append the part of path_split after the common prefix. relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \ path_split[prefix_len:] if len(relative_split) == 0: # The paths were the same. return '' # Turn it back into a string and we're done. return os.path.join(*relative_split) @memoize def InvertRelativePath(path, toplevel_dir=None): """Given a path like foo/bar that is relative to toplevel_dir, return the inverse relative path back to the toplevel_dir. E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path))) should always produce the empty string, unless the path contains symlinks. """ if not path: return path toplevel_dir = '.' if toplevel_dir is None else toplevel_dir return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path)) def FixIfRelativePath(path, relative_to): # Like RelativePath but returns |path| unchanged if it is absolute. if os.path.isabs(path): return path return RelativePath(path, relative_to) def UnrelativePath(path, relative_to): # Assuming that |relative_to| is relative to the current directory, and |path| # is a path relative to the dirname of |relative_to|, returns a path that # identifies |path| relative to the current directory. rel_dir = os.path.dirname(relative_to) return os.path.normpath(os.path.join(rel_dir, path)) # re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at # http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02 # and the documentation for various shells. # _quote is a pattern that should match any argument that needs to be quoted # with double-quotes by EncodePOSIXShellArgument. It matches the following # characters appearing anywhere in an argument: # \t, \n, space parameter separators # # comments # $ expansions (quoted to always expand within one argument) # % called out by IEEE 1003.1 XCU.2.2 # & job control # ' quoting # (, ) subshell execution # *, ?, [ pathname expansion # ; command delimiter # <, >, | redirection # = assignment # {, } brace expansion (bash) # ~ tilde expansion # It also matches the empty string, because "" (or '') is the only way to # represent an empty string literal argument to a POSIX shell. # # This does not match the characters in _escape, because those need to be # backslash-escaped regardless of whether they appear in a double-quoted # string. _quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$') # _escape is a pattern that should match any character that needs to be # escaped with a backslash, whether or not the argument matched the _quote # pattern. _escape is used with re.sub to backslash anything in _escape's # first match group, hence the (parentheses) in the regular expression. # # _escape matches the following characters appearing anywhere in an argument: # " to prevent POSIX shells from interpreting this character for quoting # \ to prevent POSIX shells from interpreting this character for escaping # ` to prevent POSIX shells from interpreting this character for command # substitution # Missing from this list is $, because the desired behavior of # EncodePOSIXShellArgument is to permit parameter (variable) expansion. # # Also missing from this list is !, which bash will interpret as the history # expansion character when history is enabled. bash does not enable history # by default in non-interactive shells, so this is not thought to be a problem. # ! was omitted from this list because bash interprets "\!" as a literal string # including the backslash character (avoiding history expansion but retaining # the backslash), which would not be correct for argument encoding. Handling # this case properly would also be problematic because bash allows the history # character to be changed with the histchars shell variable. Fortunately, # as history is not enabled in non-interactive shells and # EncodePOSIXShellArgument is only expected to encode for non-interactive # shells, there is no room for error here by ignoring !. _escape = re.compile(r'(["\\`])') def EncodePOSIXShellArgument(argument): """Encodes |argument| suitably for consumption by POSIX shells. argument may be quoted and escaped as necessary to ensure that POSIX shells treat the returned value as a literal representing the argument passed to this function. Parameter (variable) expansions beginning with $ are allowed to remain intact without escaping the $, to allow the argument to contain references to variables to be expanded by the shell. """ if not isinstance(argument, str): argument = str(argument) if _quote.search(argument): quote = '"' else: quote = '' encoded = quote + re.sub(_escape, r'\\\1', argument) + quote return encoded def EncodePOSIXShellList(list): """Encodes |list| suitably for consumption by POSIX shells. Returns EncodePOSIXShellArgument for each item in list, and joins them together using the space character as an argument separator. """ encoded_arguments = [] for argument in list: encoded_arguments.append(EncodePOSIXShellArgument(argument)) return ' '.join(encoded_arguments) def DeepDependencyTargets(target_dicts, roots): """Returns the recursive list of target dependencies.""" dependencies = set() pending = set(roots) while pending: # Pluck out one. r = pending.pop() # Skip if visited already. if r in dependencies: continue # Add it. dependencies.add(r) # Add its children. spec = target_dicts[r] pending.update(set(spec.get('dependencies', []))) pending.update(set(spec.get('dependencies_original', []))) return list(dependencies - set(roots)) def BuildFileTargets(target_list, build_file): """From a target_list, returns the subset from the specified build_file. """ return [p for p in target_list if BuildFile(p) == build_file] def AllTargets(target_list, target_dicts, build_file): """Returns all targets (direct and dependencies) for the specified build_file. """ bftargets = BuildFileTargets(target_list, build_file) deptargets = DeepDependencyTargets(target_dicts, bftargets) return bftargets + deptargets def WriteOnDiff(filename): """Write to a file only if the new contents differ. Arguments: filename: name of the file to potentially write to. Returns: A file like object which will write to temporary file and only overwrite the target if it differs (on close). """ class Writer(object): """Wrapper around file which only covers the target if it differs.""" def __init__(self): # Pick temporary file. tmp_fd, self.tmp_path = tempfile.mkstemp( suffix='.tmp', prefix=os.path.split(filename)[1] + '.gyp.', dir=os.path.split(filename)[0]) try: self.tmp_file = os.fdopen(tmp_fd, 'wb') except Exception: # Don't leave turds behind. os.unlink(self.tmp_path) raise def __getattr__(self, attrname): # Delegate everything else to self.tmp_file return getattr(self.tmp_file, attrname) def close(self): try: # Close tmp file. self.tmp_file.close() # Determine if different. same = False try: same = filecmp.cmp(self.tmp_path, filename, False) except OSError as e: if e.errno != errno.ENOENT: raise if same: # The new file is identical to the old one, just get rid of the new # one. os.unlink(self.tmp_path) else: # The new file is different from the old one, or there is no old one. # Rename the new file to the permanent name. # # tempfile.mkstemp uses an overly restrictive mode, resulting in a # file that can only be read by the owner, regardless of the umask. # There's no reason to not respect the umask here, which means that # an extra hoop is required to fetch it and reset the new file's mode. # # No way to get the umask without setting a new one? Set a safe one # and then set it back to the old value. umask = os.umask(0o77) os.umask(umask) os.chmod(self.tmp_path, 0o666 & ~umask) if sys.platform == 'win32' and os.path.exists(filename): # NOTE: on windows (but not cygwin) rename will not replace an # existing file, so it must be preceded with a remove. Sadly there # is no way to make the switch atomic. os.remove(filename) os.rename(self.tmp_path, filename) except Exception: # Don't leave turds behind. os.unlink(self.tmp_path) raise return Writer() def EnsureDirExists(path): """Make sure the directory for |path| exists.""" try: os.makedirs(os.path.dirname(path)) except OSError: pass def GetFlavor(params): """Returns |params.flavor| if it's set, the system's default flavor else.""" flavors = { 'cygwin': 'win', 'win32': 'win', 'darwin': 'mac', } if 'flavor' in params: return params['flavor'] if sys.platform in flavors: return flavors[sys.platform] if sys.platform.startswith('sunos'): return 'solaris' if sys.platform.startswith('freebsd'): return 'freebsd' if sys.platform.startswith('openbsd'): return 'openbsd' if sys.platform.startswith('aix'): return 'aix' return 'linux' def CopyTool(flavor, out_path): """Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it to |out_path|.""" # aix and solaris just need flock emulation. mac and win use more complicated # support scripts. prefix = { 'aix': 'flock', 'solaris': 'flock', 'mac': 'mac', 'win': 'win' }.get(flavor, None) if not prefix: return # Slurp input file. source_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix) with open(source_path) as source_file: source = source_file.readlines() # Add header and write it out. tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix) with open(tool_path, 'w') as tool_file: tool_file.write( ''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:])) # Make file executable. os.chmod(tool_path, 0o755) # From Alex Martelli, # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 # ASPN: Python Cookbook: Remove duplicates from a sequence # First comment, dated 2001/10/13. # (Also in the printed Python Cookbook.) def uniquer(seq, idfun=None): if idfun is None: idfun = lambda x: x seen = {} result = [] for item in seq: marker = idfun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result # Based on http://code.activestate.com/recipes/576694/. class OrderedSet(collections.MutableSet): def __init__(self, iterable=None): self.end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.map = {} # key --> [key, prev, next] if iterable is not None: self |= iterable def __len__(self): return len(self.map) def __contains__(self, key): return key in self.map def add(self, key): if key not in self.map: end = self.end curr = end[1] curr[2] = end[1] = self.map[key] = [key, curr, end] def discard(self, key): if key in self.map: key, prev_item, next_item = self.map.pop(key) prev_item[2] = next_item next_item[1] = prev_item def __iter__(self): end = self.end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] # The second argument is an addition that causes a pylint warning. def pop(self, last=True): # pylint: disable=W0221 if not self: raise KeyError('set is empty') key = self.end[1][0] if last else self.end[2][0] self.discard(key) return key def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self)) def __eq__(self, other): if isinstance(other, OrderedSet): return len(self) == len(other) and list(self) == list(other) return set(self) == set(other) # Extensions to the recipe. def update(self, iterable): for i in iterable: if i not in self: self.add(i) class CycleError(Exception): """An exception raised when an unexpected cycle is detected.""" def __init__(self, nodes): self.nodes = nodes def __str__(self): return 'CycleError: cycle involving: ' + str(self.nodes) def TopologicallySorted(graph, get_edges): r"""Topologically sort based on a user provided edge definition. Args: graph: A list of node names. get_edges: A function mapping from node name to a hashable collection of node names which this node has outgoing edges to. Returns: A list containing all of the node in graph in topological order. It is assumed that calling get_edges once for each node and caching is cheaper than repeatedly calling get_edges. Raises: CycleError in the event of a cycle. Example: graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'} def GetEdges(node): return re.findall(r'\$\(([^))]\)', graph[node]) print TopologicallySorted(graph.keys(), GetEdges) ==> ['a', 'c', b'] """ get_edges = memoize(get_edges) visited = set() visiting = set() ordered_nodes = [] def Visit(node): if node in visiting: raise CycleError(visiting) if node in visited: return visited.add(node) visiting.add(node) for neighbor in get_edges(node): Visit(neighbor) visiting.remove(node) ordered_nodes.insert(0, node) for node in sorted(graph): Visit(node) return ordered_nodes
saghul/gyn
gyn/common.py
Python
bsd-3-clause
19,147
[ "VisIt" ]
dad8ddf108ac5b0f2545e2aa511ce96670d61549dc02aa596de2a9e5ef25d3c9
#!/usr/bin/python # -*- coding: utf8 -*- import numpy as np import math import time from scipy.ndimage import correlate1d def convol1d(array,kernel,scale_factor=None): """ The convol1d function convolves an array with a kernel 1D, and returns the result. Convolution is a general process that can be used for various types of smoothing, signal processing, shifting, differentiation, edge detection, etc. """ row = array.shape[0] column = array.shape[1] R = np.zeros([row,column]) m = len(kernel) if scale_factor == None: r=correlate1d(array,kernel) R[:,m/2:column-math.ceil(m/2.)+1]=r[:,m/2:column-math.ceil(m/2.)+1] kernel=kernel/float(scale_factor) r=correlate1d(array,kernel) R[:,m/2:column-math.ceil(m/2.)+1]=r[:,m/2:column-math.ceil(m/2.)+1] return R def sconvol1d(arreglo,kernel=None,scale_factor=None,fwhm=None,std=None): """ This program will smooth a 2D array, including the edges, with one-dimensional kernels. Problems of this kind arise when, e.g. an array is to be convolved with a 2D symmetric gaussian, which is separable into two one-dimensional convolutions. """ #~ s=len(arreglo.shape) dims = np.ndim(arreglo) rows = arreglo.shape[0] collumns = arreglo.shape[1] if dims != 2: raise ValueError('Array must be 2-dimensional') if scale_factor == None: scale_factor = 1. if kernel == None: if (fwhm==None) and (std==None): raise ValueError('Convolve with what?') elif fwhm != None: std=fwhm/(2.*math.sqrt(2.*math.log(2.))) #~ elif std != None: #~ std=std elif std != None: width=int(std*9.) if width%2 == 0: width+=1 kernel=np.arange(float(width))-width/2 kernel=np.exp(-(kernel*kernel)/(2.*std*std)) kernel=kernel/(std*math.sqrt(2.*math.pi)) else: width=len(kernel) if width%2 == 0: raise ValueError('Dimension of kernel must be odd') big=np.empty([arreglo.shape[0]+width-1,arreglo.shape[1]+width-1]) edge=width/2 big[edge:big.shape[0]-edge,edge:big.shape[1]-edge]=arreglo for i in range(0,edge): big[edge:big.shape[0]-edge,i]=arreglo[:,edge-1-i] big[edge:big.shape[0]-edge,arreglo.shape[1]+edge+i]=arreglo[:,arreglo.shape[1]-1-i] big=convol1d(big,kernel,scale_factor) big=np.rot90(big,-1) for i in range(0,edge): big[:,i] = big[:,2*edge-1-i] big[:,arreglo.shape[0]+edge+i] = big[:,arreglo.shape[0]+edge-1-i] big=convol1d(big,kernel,scale_factor) big=np.rot90(big,-3) big=big[edge:arreglo.shape[0]+edge,edge:arreglo.shape[1]+edge] return big #~ start = time.time() #~ a=np.linspace(0,math.pi,30).reshape([5,6]) #~ kernel = np.array([1,2,3,2,1]) #~ print sconvol1d(a,std=0.10616525) #~ print (time.time() - start), " seconds"
Hypnus1803/FlowMaps
MainCodes/convol.py
Python
gpl-3.0
2,661
[ "Gaussian" ]
6113172766cdbe13327672d05bb3f1f085a4ee5e88369389b2a5520ed84a8227
#!/usr/bin/python # # Created on Aug 25, 2016 # @author: Gaurav Rastogi (grastogi@avinetworks.com) # Eric Anderson (eanderson@avinetworks.com) # module_check: supported # Avi Version: 17.1.1 # # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: avi_virtualservice author: Gaurav Rastogi (grastogi@avinetworks.com) short_description: Module for setup of VirtualService Avi RESTful Object description: - This module is used to configure VirtualService object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] version_added: "2.3" options: state: description: - The state that should be applied on the entity. default: present choices: ["absent","present"] active_standby_se_tag: description: - This configuration only applies if the virtualservice is in legacy active standby ha mode and load distribution among active standby is enabled. - This field is used to tag the virtualservice so that virtualservices with the same tag will share the same active serviceengine. - Virtualservices with different tags will have different active serviceengines. - If one of the serviceengine's in the serviceenginegroup fails, all virtualservices will end up using the same active serviceengine. - Redistribution of the virtualservices can be either manual or automated when the failed serviceengine recovers. - Redistribution is based on the auto redistribute property of the serviceenginegroup. - Enum options - ACTIVE_STANDBY_SE_1, ACTIVE_STANDBY_SE_2. - Default value when not specified in API or module is interpreted by Avi Controller as ACTIVE_STANDBY_SE_1. analytics_policy: description: - Determines analytics settings for the application. analytics_profile_ref: description: - Specifies settings related to analytics. - It is a reference to an object of type analyticsprofile. application_profile_ref: description: - Enable application layer specific features for the virtual service. - It is a reference to an object of type applicationprofile. auto_allocate_floating_ip: description: - Auto-allocate floating/elastic ip from the cloud infrastructure. - Field deprecated in 17.1.1. - Default value when not specified in API or module is interpreted by Avi Controller as False. auto_allocate_ip: description: - Auto-allocate vip from the provided subnet. - Field deprecated in 17.1.1. - Default value when not specified in API or module is interpreted by Avi Controller as False. availability_zone: description: - Availability-zone to place the virtual service. - Field deprecated in 17.1.1. avi_allocated_fip: description: - (internal-use) fip allocated by avi in the cloud infrastructure. - Field deprecated in 17.1.1. - Default value when not specified in API or module is interpreted by Avi Controller as False. avi_allocated_vip: description: - (internal-use) vip allocated by avi in the cloud infrastructure. - Field deprecated in 17.1.1. - Default value when not specified in API or module is interpreted by Avi Controller as False. client_auth: description: - Http authentication configuration for protected resources. cloud_config_cksum: description: - Checksum of cloud configuration for vs. - Internally set by cloud connector. cloud_ref: description: - It is a reference to an object of type cloud. cloud_type: description: - Enum options - cloud_none, cloud_vcenter, cloud_openstack, cloud_aws, cloud_vca, cloud_apic, cloud_mesos, cloud_linuxserver, cloud_docker_ucp, - cloud_rancher, cloud_oshift_k8s. - Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE. connections_rate_limit: description: - Rate limit the incoming connections to this virtual service. content_rewrite: description: - Profile used to match and rewrite strings in request and/or response body. created_by: description: - Creator name. delay_fairness: description: - Select the algorithm for qos fairness. - This determines how multiple virtual services sharing the same service engines will prioritize traffic over a congested network. - Default value when not specified in API or module is interpreted by Avi Controller as False. description: description: - User defined description for the object. discovered_network_ref: description: - (internal-use) discovered networks providing reachability for client facing virtual service ip. - This field is deprecated. - It is a reference to an object of type network. - Field deprecated in 17.1.1. discovered_networks: description: - (internal-use) discovered networks providing reachability for client facing virtual service ip. - This field is used internally by avi, not editable by the user. - Field deprecated in 17.1.1. discovered_subnet: description: - (internal-use) discovered subnets providing reachability for client facing virtual service ip. - This field is deprecated. - Field deprecated in 17.1.1. dns_info: description: - Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record. - Note that only one of fqdn and dns_info setting is allowed. dns_policies: description: - Dns policies applied on the dns traffic of the virtual service. - Field introduced in 17.1.1. version_added: "2.4" east_west_placement: description: - Force placement on all se's in service group (mesos mode only). - Default value when not specified in API or module is interpreted by Avi Controller as False. enable_autogw: description: - Response traffic to clients will be sent back to the source mac address of the connection, rather than statically sent to a default gateway. - Default value when not specified in API or module is interpreted by Avi Controller as True. enable_rhi: description: - Enable route health injection using the bgp config in the vrf context. enable_rhi_snat: description: - Enable route health injection for source nat'ted floating ip address using the bgp config in the vrf context. enabled: description: - Enable or disable the virtual service. - Default value when not specified in API or module is interpreted by Avi Controller as True. floating_ip: description: - Floating ip to associate with this virtual service. - Field deprecated in 17.1.1. floating_subnet_uuid: description: - If auto_allocate_floating_ip is true and more than one floating-ip subnets exist, then the subnet for the floating ip address allocation. - This field is applicable only if the virtualservice belongs to an openstack or aws cloud. - In openstack or aws cloud it is required when auto_allocate_floating_ip is selected. - Field deprecated in 17.1.1. flow_dist: description: - Criteria for flow distribution among ses. - Enum options - LOAD_AWARE, CONSISTENT_HASH_SOURCE_IP_ADDRESS, CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT. - Default value when not specified in API or module is interpreted by Avi Controller as LOAD_AWARE. flow_label_type: description: - Criteria for flow labelling. - Enum options - NO_LABEL, SERVICE_LABEL. - Default value when not specified in API or module is interpreted by Avi Controller as NO_LABEL. fqdn: description: - Dns resolvable, fully qualified domain name of the virtualservice. - Only one of 'fqdn' and 'dns_info' configuration is allowed. host_name_xlate: description: - Translate the host name sent to the servers to this value. - Translate the host name sent from servers back to the value used by the client. http_policies: description: - Http policies applied on the data traffic of the virtual service. ign_pool_net_reach: description: - Ignore pool servers network reachability constraints for virtual service placement. - Default value when not specified in API or module is interpreted by Avi Controller as False. ip_address: description: - Ip address of the virtual service. - Field deprecated in 17.1.1. ipam_network_subnet: description: - Subnet and/or network for allocating virtualservice ip by ipam provider module. limit_doser: description: - Limit potential dos attackers who exceed max_cps_per_client significantly to a fraction of max_cps_per_client for a while. - Default value when not specified in API or module is interpreted by Avi Controller as False. max_cps_per_client: description: - Maximum connections per second per client ip. - Allowed values are 10-1000. - Special values are 0- 'unlimited'. - Default value when not specified in API or module is interpreted by Avi Controller as 0. microservice_ref: description: - Microservice representing the virtual service. - It is a reference to an object of type microservice. name: description: - Name for the virtual service. required: true network_profile_ref: description: - Determines network settings such as protocol, tcp or udp, and related options for the protocol. - It is a reference to an object of type networkprofile. network_ref: description: - Manually override the network on which the virtual service is placed. - It is a reference to an object of type network. - Field deprecated in 17.1.1. network_security_policy_ref: description: - Network security policies for the virtual service. - It is a reference to an object of type networksecuritypolicy. nsx_securitygroup: description: - A list of nsx service groups representing the clients which can access the virtual ip of the virtual service. - Field introduced in 17.1.1. version_added: "2.4" performance_limits: description: - Optional settings that determine performance limits like max connections or bandwdith etc. pool_group_ref: description: - The pool group is an object that contains pools. - It is a reference to an object of type poolgroup. pool_ref: description: - The pool is an object that contains destination servers and related attributes such as load-balancing and persistence. - It is a reference to an object of type pool. port_uuid: description: - (internal-use) network port assigned to the virtual service ip address. - Field deprecated in 17.1.1. remove_listening_port_on_vs_down: description: - Remove listening port if virtualservice is down. - Default value when not specified in API or module is interpreted by Avi Controller as False. requests_rate_limit: description: - Rate limit the incoming requests to this virtual service. scaleout_ecmp: description: - Disable re-distribution of flows across service engines for a virtual service. - Enable if the network itself performs flow hashing with ecmp in environments such as gcp. - Default value when not specified in API or module is interpreted by Avi Controller as False. se_group_ref: description: - The service engine group to use for this virtual service. - Moving to a new se group is disruptive to existing connections for this vs. - It is a reference to an object of type serviceenginegroup. server_network_profile_ref: description: - Determines the network settings profile for the server side of tcp proxied connections. - Leave blank to use the same settings as the client to vs side of the connection. - It is a reference to an object of type networkprofile. service_metadata: description: - Metadata pertaining to the service provided by this virtual service. - In openshift/kubernetes environments, egress pod info is stored. - Any user input to this field will be overwritten by avi vantage. version_added: "2.4" service_pool_select: description: - Select pool based on destination port. services: description: - List of services defined for this virtual service. sideband_profile: description: - Sideband configuration to be used for this virtualservice.it can be used for sending traffic to sideband vips for external inspection etc. version_added: "2.4" snat_ip: description: - Nat'ted floating source ip address(es) for upstream connection to servers. ssl_key_and_certificate_refs: description: - Select or create one or two certificates, ec and/or rsa, that will be presented to ssl/tls terminated connections. - It is a reference to an object of type sslkeyandcertificate. ssl_profile_ref: description: - Determines the set of ssl versions and ciphers to accept for ssl/tls terminated connections. - It is a reference to an object of type sslprofile. ssl_sess_cache_avg_size: description: - Expected number of ssl session cache entries (may be exceeded). - Allowed values are 1024-16383. - Default value when not specified in API or module is interpreted by Avi Controller as 1024. static_dns_records: description: - List of static dns records applied to this virtual service. - These are static entries and no health monitoring is performed against the ip addresses. subnet: description: - Subnet providing reachability for client facing virtual service ip. - Field deprecated in 17.1.1. subnet_uuid: description: - It represents subnet for the virtual service ip address allocation when auto_allocate_ip is true.it is only applicable in openstack or aws cloud. - This field is required if auto_allocate_ip is true. - Field deprecated in 17.1.1. tenant_ref: description: - It is a reference to an object of type tenant. traffic_clone_profile_ref: description: - Server network or list of servers for cloning traffic. - It is a reference to an object of type trafficcloneprofile. - Field introduced in 17.1.1. version_added: "2.4" type: description: - Specify if this is a normal virtual service, or if it is the parent or child of an sni-enabled virtual hosted virtual service. - Enum options - VS_TYPE_NORMAL, VS_TYPE_VH_PARENT, VS_TYPE_VH_CHILD. - Default value when not specified in API or module is interpreted by Avi Controller as VS_TYPE_NORMAL. url: description: - Avi controller URL of the object. use_bridge_ip_as_vip: description: - Use bridge ip as vip on each host in mesos deployments. - Default value when not specified in API or module is interpreted by Avi Controller as False. uuid: description: - Uuid of the virtualservice. vh_domain_name: description: - The exact name requested from the client's sni-enabled tls hello domain name field. - If this is a match, the parent vs will forward the connection to this child vs. vh_parent_vs_uuid: description: - Specifies the virtual service acting as virtual hosting (sni) parent. vip: description: - List of virtual service ips. - While creating a 'shared vs',please use vsvip_ref to point to the shared entities. - Field introduced in 17.1.1. version_added: "2.4" vrf_context_ref: description: - Virtual routing context that the virtual service is bound to. - This is used to provide the isolation of the set of networks the application is attached to. - It is a reference to an object of type vrfcontext. vs_datascripts: description: - Datascripts applied on the data traffic of the virtual service. vsvip_ref: description: - Mostly used during the creation of shared vs, this fieldrefers to entities that can be shared across virtual services. - It is a reference to an object of type vsvip. - Field introduced in 17.1.1. version_added: "2.4" weight: description: - The quality of service weight to assign to traffic transmitted from this virtual service. - A higher weight will prioritize traffic versus other virtual services sharing the same service engines. - Default value when not specified in API or module is interpreted by Avi Controller as 1. extends_documentation_fragment: - avi ''' EXAMPLES = ''' - name: Create SSL Virtual Service using Pool testpool2 avi_virtualservice: controller: 10.10.27.90 username: admin password: AviNetworks123! name: newtestvs state: present performance_limits: max_concurrent_connections: 1000 services: - port: 443 enable_ssl: true - port: 80 ssl_profile_ref: '/api/sslprofile?name=System-Standard' application_profile_ref: '/api/applicationprofile?name=System-Secure-HTTP' ssl_key_and_certificate_refs: - '/api/sslkeyandcertificate?name=System-Default-Cert' ip_address: addr: 10.90.131.103 type: V4 pool_ref: '/api/pool?name=testpool2' ''' RETURN = ''' obj: description: VirtualService (api/virtualservice) object returned: success, changed type: dict ''' from ansible.module_utils.basic import AnsibleModule try: from ansible.module_utils.avi import ( avi_common_argument_spec, HAS_AVI, avi_ansible_api) except ImportError: HAS_AVI = False def main(): argument_specs = dict( state=dict(default='present', choices=['absent', 'present']), active_standby_se_tag=dict(type='str',), analytics_policy=dict(type='dict',), analytics_profile_ref=dict(type='str',), application_profile_ref=dict(type='str',), auto_allocate_floating_ip=dict(type='bool',), auto_allocate_ip=dict(type='bool',), availability_zone=dict(type='str',), avi_allocated_fip=dict(type='bool',), avi_allocated_vip=dict(type='bool',), client_auth=dict(type='dict',), cloud_config_cksum=dict(type='str',), cloud_ref=dict(type='str',), cloud_type=dict(type='str',), connections_rate_limit=dict(type='dict',), content_rewrite=dict(type='dict',), created_by=dict(type='str',), delay_fairness=dict(type='bool',), description=dict(type='str',), discovered_network_ref=dict(type='list',), discovered_networks=dict(type='list',), discovered_subnet=dict(type='list',), dns_info=dict(type='list',), dns_policies=dict(type='list',), east_west_placement=dict(type='bool',), enable_autogw=dict(type='bool',), enable_rhi=dict(type='bool',), enable_rhi_snat=dict(type='bool',), enabled=dict(type='bool',), floating_ip=dict(type='dict',), floating_subnet_uuid=dict(type='str',), flow_dist=dict(type='str',), flow_label_type=dict(type='str',), fqdn=dict(type='str',), host_name_xlate=dict(type='str',), http_policies=dict(type='list',), ign_pool_net_reach=dict(type='bool',), ip_address=dict(type='dict',), ipam_network_subnet=dict(type='dict',), limit_doser=dict(type='bool',), max_cps_per_client=dict(type='int',), microservice_ref=dict(type='str',), name=dict(type='str', required=True), network_profile_ref=dict(type='str',), network_ref=dict(type='str',), network_security_policy_ref=dict(type='str',), nsx_securitygroup=dict(type='list',), performance_limits=dict(type='dict',), pool_group_ref=dict(type='str',), pool_ref=dict(type='str',), port_uuid=dict(type='str',), remove_listening_port_on_vs_down=dict(type='bool',), requests_rate_limit=dict(type='dict',), scaleout_ecmp=dict(type='bool',), se_group_ref=dict(type='str',), server_network_profile_ref=dict(type='str',), service_metadata=dict(type='str',), service_pool_select=dict(type='list',), services=dict(type='list',), sideband_profile=dict(type='dict',), snat_ip=dict(type='list',), ssl_key_and_certificate_refs=dict(type='list',), ssl_profile_ref=dict(type='str',), ssl_sess_cache_avg_size=dict(type='int',), static_dns_records=dict(type='list',), subnet=dict(type='dict',), subnet_uuid=dict(type='str',), tenant_ref=dict(type='str',), traffic_clone_profile_ref=dict(type='str',), type=dict(type='str',), url=dict(type='str',), use_bridge_ip_as_vip=dict(type='bool',), uuid=dict(type='str',), vh_domain_name=dict(type='list',), vh_parent_vs_uuid=dict(type='str',), vip=dict(type='list',), vrf_context_ref=dict(type='str',), vs_datascripts=dict(type='list',), vsvip_ref=dict(type='str',), weight=dict(type='int',), ) argument_specs.update(avi_common_argument_spec()) module = AnsibleModule( argument_spec=argument_specs, supports_check_mode=True) if not HAS_AVI: return module.fail_json(msg=( 'Avi python API SDK (avisdk>=17.1) is not installed. ' 'For more details visit https://github.com/avinetworks/sdk.')) return avi_ansible_api(module, 'virtualservice', set([])) if __name__ == '__main__': main()
e-gob/plataforma-kioscos-autoatencion
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_virtualservice.py
Python
bsd-3-clause
24,047
[ "VisIt" ]
76abef2446e9df23b92489a4c70073cf6d57ae9cbf9c3079ad80e9f7cdd32605
class Grabber: def __init__(self, baseurl, proxies=none): self.baseurl = baseurl self.proxies = proxies def getPage(projectId): import requests response = requests.get(baseurl + '/' + projectId + '/') # if response.status_code != 200: # print "Well, that didn't work" # sys.exit("blast!") return response.text def getAbstract(text): from xml.etree import cElementTree as ET project = ET.fromstring(text) abs = project.findall('{http://gtr.rcuk.ac.uk/gtr/api/project}abstractText') return abs[0].text def main(): url ="http://gtr.rcuk.ac.uk/gtr/api/projects/04D6290F-68B6-47AC-8D60-00796E01532E/" projectText = getPage(url, {"http": "http://wwwcache.rl.ac.uk:8080/"} ) if len(projectText) == 0: sys.exit("projectText is zero-length") else: pass # print projectText[:40] abstract = getAbstract(projectText) if projectText is not None else None print abstract if __name__ == "__main__": main()
ijjorama/Impact
grabber.py
Python
mit
1,024
[ "BLAST" ]
3cde86fc30c26526569dcc3e9af8580ad529ee7aa59adfb978cbe34189862a8b
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Builds the CIFAR-10 network. Summary of available functions: # Compute input images and labels for training. If you would like to run # evaluations, use inputs() instead. inputs, labels = distorted_inputs() # Compute inference on the model inputs to make a prediction. predictions = inference(inputs) # Compute the total loss of the prediction with respect to the labels. loss = loss(predictions, labels) # Create a graph to run one step of training with respect to the loss. train_op = train(loss, global_step) """ # pylint: disable=missing-docstring from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import re import sys import tarfile from six.moves import urllib import tensorflow as tf from tensorflow.models.image.cifar10 import cifar10_input FLAGS = tf.app.flags.FLAGS # Basic model parameters. tf.app.flags.DEFINE_integer('batch_size', 128, """Number of images to process in a batch.""") tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar10_data', """Path to the CIFAR-10 data directory.""") tf.app.flags.DEFINE_boolean('use_fp16', False, """Train the model using fp16.""") # Global constants describing the CIFAR-10 data set. IMAGE_SIZE = cifar10_input.IMAGE_SIZE NUM_CLASSES = cifar10_input.NUM_CLASSES NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL # Constants describing the training process. MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average. NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays. LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor. INITIAL_LEARNING_RATE = 0.1 # Initial learning rate. # If a model is trained with multiple GPUs, prefix all Op names with tower_name # to differentiate the operations. Note that this prefix is removed from the # names of the summaries when visualizing a model. TOWER_NAME = 'tower' DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz' def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.histogram_summary(tensor_name + '/activations', x) tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) def _variable_on_cpu(name, shape, initializer): """Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor """ with tf.device('/cpu:0'): dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var def _variable_with_weight_decay(name, shape, stddev, wd): """Helper to create an initialized Variable with weight decay. Note that the Variable is initialized with a truncated normal distribution. A weight decay is added only if one is specified. Args: name: name of the variable shape: list of ints stddev: standard deviation of a truncated Gaussian wd: add L2Loss weight decay multiplied by this float. If None, weight decay is not added for this Variable. Returns: Variable Tensor """ dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = _variable_on_cpu( name, shape, tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)) if wd is not None: weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss') tf.add_to_collection('losses', weight_decay) return var def distorted_inputs(): """Construct distorted input for CIFAR training using the Reader ops. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir """ if not FLAGS.data_dir: raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') images, labels = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return images, labels def inputs(eval_data): """Construct input for CIFAR evaluation using the Reader ops. Args: eval_data: bool, indicating if one should use the train or eval data set. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. Raises: ValueError: If no data_dir """ if not FLAGS.data_dir: raise ValueError('Please supply a data_dir') data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin') images, labels = cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir, batch_size=FLAGS.batch_size) if FLAGS.use_fp16: images = tf.cast(images, tf.float16) labels = tf.cast(labels, tf.float16) return images, labels def inference(images): """Build the CIFAR-10 model. Args: images: Images returned from distorted_inputs() or inputs(). Returns: Logits. """ # We instantiate all variables using tf.get_variable() instead of # tf.Variable() in order to share variables across multiple GPU training runs. # If we only ran this model on a single GPU, we could simplify this function # by replacing all instances of tf.get_variable() with tf.Variable(). # # conv1 with tf.variable_scope('conv1') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 3, 64], stddev=5e-2, wd=0.0) conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.0)) bias = tf.nn.bias_add(conv, biases) conv1 = tf.nn.relu(bias, name=scope.name) _activation_summary(conv1) # pool1 pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1') # norm1 norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1') # conv2 with tf.variable_scope('conv2') as scope: kernel = _variable_with_weight_decay('weights', shape=[5, 5, 64, 64], stddev=5e-2, wd=0.0) conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME') biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1)) bias = tf.nn.bias_add(conv, biases) conv2 = tf.nn.relu(bias, name=scope.name) _activation_summary(conv2) # norm2 norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2') # pool2 pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2') # local3 with tf.variable_scope('local3') as scope: # Move everything into depth so we can perform a single matrix multiply. reshape = tf.reshape(pool2, [FLAGS.batch_size, -1]) dim = reshape.get_shape()[1].value weights = _variable_with_weight_decay('weights', shape=[dim, 384], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1)) local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name) _activation_summary(local3) # local4 with tf.variable_scope('local4') as scope: weights = _variable_with_weight_decay('weights', shape=[384, 192], stddev=0.04, wd=0.004) biases = _variable_on_cpu('biases', [192], tf.constant_initializer(0.1)) local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name=scope.name) _activation_summary(local4) # softmax, i.e. softmax(WX + b) with tf.variable_scope('softmax_linear') as scope: weights = _variable_with_weight_decay('weights', [192, NUM_CLASSES], stddev=1/192.0, wd=0.0) biases = _variable_on_cpu('biases', [NUM_CLASSES], tf.constant_initializer(0.0)) softmax_linear = tf.add(tf.matmul(local4, weights), biases, name=scope.name) _activation_summary(softmax_linear) return softmax_linear def loss(logits, labels): """Add L2Loss to all the trainable variables. Add summary for "Loss" and "Loss/avg". Args: logits: Logits from inference(). labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [batch_size] Returns: Loss tensor of type float. """ # Calculate the average cross entropy loss across the batch. labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits, labels, name='cross_entropy_per_example') cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) # The total loss is defined as the cross entropy loss plus all of the weight # decay terms (L2 loss). return tf.add_n(tf.get_collection('losses'), name='total_loss') def _add_loss_summaries(total_loss): """Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name +' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op def train(total_loss, global_step): """Train CIFAR-10 model. Create an optimizer and apply to all trainable variables. Add moving average for all trainable variables. Args: total_loss: Total loss from loss(). global_step: Integer Variable counting the number of training steps processed. Returns: train_op: op for training. """ # Variables that affect learning rate. num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY) # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True) tf.scalar_summary('learning_rate', lr) # Generate moving averages of all losses and associated summaries. loss_averages_op = _add_loss_summaries(total_loss) # Compute gradients. with tf.control_dependencies([loss_averages_op]): opt = tf.train.GradientDescentOptimizer(lr) grads = opt.compute_gradients(total_loss) # Apply gradients. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): tf.histogram_summary(var.op.name, var) # Add histograms for gradients. for grad, var in grads: if grad is not None: tf.histogram_summary(var.op.name + '/gradients', grad) # Track the moving averages of all trainable variables. variable_averages = tf.train.ExponentialMovingAverage( MOVING_AVERAGE_DECAY, global_step) variables_averages_op = variable_averages.apply(tf.trainable_variables()) with tf.control_dependencies([apply_gradient_op, variables_averages_op]): train_op = tf.no_op(name='train') return train_op def maybe_download_and_extract(): """Download and extract the tarball from Alex's website.""" dest_directory = FLAGS.data_dir if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = DATA_URL.split('/')[-1] filepath = os.path.join(dest_directory, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dest_directory)
HaebinShin/tensorflow
tensorflow/models/image/cifar10/cifar10.py
Python
apache-2.0
14,317
[ "Gaussian" ]
f0ce503b3f1bee2069d4873f679cf92fdc50e323b9cc3cf21db7d36fa58986a0
"""The ants module provides basic functions for interfacing with ants functions. Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data')) >>> os.chdir(datadir) """ from builtins import range from ..base import TraitedSpec, File, traits, InputMultiPath from .base import ANTSCommand, ANTSCommandInputSpec import os from ..traits_extension import isdefined class ANTSInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='%d', usedefault=False, position=1, desc='image dimension (2 or 3)') fixed_image = InputMultiPath(File(exists=True), mandatory=True, desc=('image to apply transformation to (generally a coregistered ' 'functional)')) moving_image = InputMultiPath(File(exists=True), argstr='%s', mandatory=True, desc=('image to apply transformation to (generally a coregistered ' 'functional)')) # Not all metrics are appropriate for all modalities. Also, not all metrics # are efficeint or appropriate at all resolution levels, Some metrics perform # well for gross global registraiton, but do poorly for small changes (i.e. # Mattes), and some metrics do well for small changes but don't work well for # gross level changes (i.e. 'CC'). # # This is a two stage registration. in the first stage # [ 'Mattes', .................] # ^^^^^^ <- First stage # Do a unimodal registration of the first elements of the fixed/moving input # list use the"CC" as the metric. # # In the second stage # [ ....., ['Mattes','CC'] ] # ^^^^^^^^^^^^^^^ <- Second stage # Do a multi-modal registration where the first elements of fixed/moving # input list use 'CC' metric and that is added to 'Mattes' metric result of # the second elements of the fixed/moving input. # # Cost = Sum_i ( metricweight[i] Metric_i ( fixedimage[i], movingimage[i]) ) metric = traits.List(traits.Enum('CC', 'MI', 'SMI', 'PR', 'SSD', 'MSQ', 'PSE'), mandatory=True, desc='') metric_weight = traits.List(traits.Float(), requires=['metric'], desc='') radius = traits.List(traits.Int(), requires=['metric'], desc='') output_transform_prefix = traits.Str('out', usedefault=True, argstr='--output-naming %s', mandatory=True, desc='') transformation_model = traits.Enum('Diff', 'Elast', 'Exp', 'Greedy Exp', 'SyN', argstr='%s', mandatory=True, desc='') gradient_step_length = traits.Float( requires=['transformation_model'], desc='') number_of_time_steps = traits.Float( requires=['gradient_step_length'], desc='') delta_time = traits.Float(requires=['number_of_time_steps'], desc='') symmetry_type = traits.Float(requires=['delta_time'], desc='') use_histogram_matching = traits.Bool( argstr='%s', default=True, usedefault=True) number_of_iterations = traits.List( traits.Int(), argstr='--number-of-iterations %s', sep='x') smoothing_sigmas = traits.List( traits.Int(), argstr='--gaussian-smoothing-sigmas %s', sep='x') subsampling_factors = traits.List( traits.Int(), argstr='--subsampling-factors %s', sep='x') affine_gradient_descent_option = traits.List(traits.Float(), argstr='%s') mi_option = traits.List(traits.Int(), argstr='--MI-option %s', sep='x') regularization = traits.Enum('Gauss', 'DMFFD', argstr='%s', desc='') regularization_gradient_field_sigma = traits.Float( requires=['regularization'], desc='') regularization_deformation_field_sigma = traits.Float( requires=['regularization'], desc='') number_of_affine_iterations = traits.List( traits.Int(), argstr='--number-of-affine-iterations %s', sep='x') class ANTSOutputSpec(TraitedSpec): affine_transform = File(exists=True, desc='Affine transform file') warp_transform = File(exists=True, desc='Warping deformation field') inverse_warp_transform = File( exists=True, desc='Inverse warping deformation field') metaheader = File(exists=True, desc='VTK metaheader .mhd file') metaheader_raw = File(exists=True, desc='VTK metaheader .raw file') class ANTS(ANTSCommand): """ Examples -------- >>> from nipype.interfaces.ants import ANTS >>> ants = ANTS() >>> ants.inputs.dimension = 3 >>> ants.inputs.output_transform_prefix = 'MY' >>> ants.inputs.metric = ['CC'] >>> ants.inputs.fixed_image = ['T1.nii'] >>> ants.inputs.moving_image = ['resting.nii'] >>> ants.inputs.metric_weight = [1.0] >>> ants.inputs.radius = [5] >>> ants.inputs.transformation_model = 'SyN' >>> ants.inputs.gradient_step_length = 0.25 >>> ants.inputs.number_of_iterations = [50, 35, 15] >>> ants.inputs.use_histogram_matching = True >>> ants.inputs.mi_option = [32, 16000] >>> ants.inputs.regularization = 'Gauss' >>> ants.inputs.regularization_gradient_field_sigma = 3 >>> ants.inputs.regularization_deformation_field_sigma = 0 >>> ants.inputs.number_of_affine_iterations = [10000,10000,10000,10000,10000] >>> ants.cmdline 'ANTS 3 --MI-option 32x16000 --image-metric CC[ T1.nii, resting.nii, 1, 5 ] --number-of-affine-iterations \ 10000x10000x10000x10000x10000 --number-of-iterations 50x35x15 --output-naming MY --regularization Gauss[3.0,0.0] \ --transformation-model SyN[0.25] --use-Histogram-Matching 1' """ _cmd = 'ANTS' input_spec = ANTSInputSpec output_spec = ANTSOutputSpec def _image_metric_constructor(self): retval = [] intensity_based = ['CC', 'MI', 'SMI', 'PR', 'SSD', 'MSQ'] point_set_based = ['PSE', 'JTB'] for ii in range(len(self.inputs.moving_image)): if self.inputs.metric[ii] in intensity_based: retval.append( '--image-metric %s[ %s, %s, %g, %d ]' % (self.inputs.metric[ii], self.inputs.fixed_image[ ii], self.inputs.moving_image[ ii], self.inputs.metric_weight[ ii], self.inputs.radius[ii])) elif self.inputs.metric[ii] == point_set_based: pass # retval.append('--image-metric %s[%s, %s, ...'.format(self.inputs.metric[ii], # self.inputs.fixed_image[ii], self.inputs.moving_image[ii], ...)) return ' '.join(retval) def _transformation_constructor(self): model = self.inputs.transformation_model step_length = self.inputs.gradient_step_length time_step = self.inputs.number_of_time_steps delta_time = self.inputs.delta_time symmetry_type = self.inputs.symmetry_type retval = ['--transformation-model %s' % model] parameters = [] for elem in (step_length, time_step, delta_time, symmetry_type): if elem is not traits.Undefined: parameters.append('%#.2g' % elem) if len(parameters) > 0: if len(parameters) > 1: parameters = ','.join(parameters) else: parameters = ''.join(parameters) retval.append('[%s]' % parameters) return ''.join(retval) def _regularization_constructor(self): return '--regularization {0}[{1},{2}]'.format(self.inputs.regularization, self.inputs.regularization_gradient_field_sigma, self.inputs.regularization_deformation_field_sigma) def _affine_gradient_descent_option_constructor(self): values = self.inputs.affine_gradient_descent_option defaults = [0.1, 0.5, 1.e-4, 1.e-4] for ii in range(len(defaults)): try: defaults[ii] = values[ii] except IndexError: break parameters = self._format_xarray([('%g' % defaults[index]) for index in range(4)]) retval = ['--affine-gradient-descent-option', parameters] return ' '.join(retval) def _format_arg(self, opt, spec, val): if opt == 'moving_image': return self._image_metric_constructor() elif opt == 'transformation_model': return self._transformation_constructor() elif opt == 'regularization': return self._regularization_constructor() elif opt == 'affine_gradient_descent_option': return self._affine_gradient_descent_option_constructor() elif opt == 'use_histogram_matching': if self.inputs.use_histogram_matching: return '--use-Histogram-Matching 1' else: return '--use-Histogram-Matching 0' return super(ANTS, self)._format_arg(opt, spec, val) def _list_outputs(self): outputs = self._outputs().get() outputs['affine_transform'] = os.path.abspath( self.inputs.output_transform_prefix + 'Affine.txt') outputs['warp_transform'] = os.path.abspath( self.inputs.output_transform_prefix + 'Warp.nii.gz') outputs['inverse_warp_transform'] = os.path.abspath( self.inputs.output_transform_prefix + 'InverseWarp.nii.gz') # outputs['metaheader'] = os.path.abspath(self.inputs.output_transform_prefix + 'velocity.mhd') # outputs['metaheader_raw'] = os.path.abspath(self.inputs.output_transform_prefix + 'velocity.raw') return outputs class RegistrationInputSpec(ANTSCommandInputSpec): dimension = traits.Enum(3, 2, argstr='--dimensionality %d', usedefault=True, desc='image dimension (2 or 3)') fixed_image = InputMultiPath(File(exists=True), mandatory=True, desc='image to apply transformation to (generally a coregistered functional)') fixed_image_mask = File(argstr='%s', exists=True, desc='mask used to limit metric sampling region of the fixed image') moving_image = InputMultiPath(File(exists=True), mandatory=True, desc='image to apply transformation to (generally a coregistered functional)') moving_image_mask = File(requires=['fixed_image_mask'], exists=True, desc='mask used to limit metric sampling region of the moving image') save_state = File(argstr='--save-state %s', exists=False, desc='Filename for saving the internal restorable state of the registration') restore_state = File(argstr='--restore-state %s', exists=True, desc='Filename for restoring the internal restorable state of the registration') initial_moving_transform = File(argstr='%s', exists=True, desc='', xor=['initial_moving_transform_com']) invert_initial_moving_transform = traits.Bool(requires=["initial_moving_transform"], desc='', xor=['initial_moving_transform_com']) initial_moving_transform_com = traits.Enum(0, 1, 2, argstr='%s', default=0, xor=['initial_moving_transform'], desc="Use center of mass for moving transform") metric_item_trait = traits.Enum("CC", "MeanSquares", "Demons", "GC", "MI", "Mattes") metric_stage_trait = traits.Either( metric_item_trait, traits.List(metric_item_trait)) metric = traits.List(metric_stage_trait, mandatory=True, desc='the metric(s) to use for each stage. ' 'Note that multiple metrics per stage are not supported ' 'in ANTS 1.9.1 and earlier.') metric_weight_item_trait = traits.Float(1.0) metric_weight_stage_trait = traits.Either( metric_weight_item_trait, traits.List(metric_weight_item_trait)) metric_weight = traits.List( metric_weight_stage_trait, value=[1.0], usedefault=True, requires=['metric'], mandatory=True, desc='the metric weight(s) for each stage. ' 'The weights must sum to 1 per stage.') radius_bins_item_trait = traits.Int(5) radius_bins_stage_trait = traits.Either( radius_bins_item_trait, traits.List(radius_bins_item_trait)) radius_or_number_of_bins = traits.List( radius_bins_stage_trait, value=[5], usedefault=True, requires=['metric_weight'], desc='the number of bins in each stage for the MI and Mattes metric, ' 'the radius for other metrics') sampling_strategy_item_trait = traits.Enum( "None", "Regular", "Random", None) sampling_strategy_stage_trait = traits.Either( sampling_strategy_item_trait, traits.List(sampling_strategy_item_trait)) sampling_strategy = traits.List( trait=sampling_strategy_stage_trait, requires=['metric_weight'], desc='the metric sampling strategy (strategies) for each stage') sampling_percentage_item_trait = traits.Either( traits.Range(low=0.0, high=1.0), None) sampling_percentage_stage_trait = traits.Either( sampling_percentage_item_trait, traits.List(sampling_percentage_item_trait)) sampling_percentage = traits.List( trait=sampling_percentage_stage_trait, requires=['sampling_strategy'], desc="the metric sampling percentage(s) to use for each stage") use_estimate_learning_rate_once = traits.List(traits.Bool(), desc='') use_histogram_matching = traits.Either( traits.Bool, traits.List(traits.Bool(argstr='%s')), default=True, usedefault=True) interpolation = traits.Enum( 'Linear', 'NearestNeighbor', 'CosineWindowedSinc', 'WelchWindowedSinc', 'HammingWindowedSinc', 'LanczosWindowedSinc', 'BSpline', 'MultiLabel', 'Gaussian', argstr='%s', usedefault=True) interpolation_parameters = traits.Either(traits.Tuple(traits.Int()), # BSpline (order) traits.Tuple(traits.Float(), # Gaussian/MultiLabel (sigma, alpha) traits.Float()) ) write_composite_transform = traits.Bool( argstr='--write-composite-transform %d', default=False, usedefault=True, desc='') collapse_output_transforms = traits.Bool( argstr='--collapse-output-transforms %d', default=True, usedefault=True, # This should be true for explicit completeness desc=('Collapse output transforms. Specifically, enabling this option ' 'combines all adjacent linear transforms and composes all ' 'adjacent displacement field transforms before writing the ' 'results to disk.')) initialize_transforms_per_stage = traits.Bool( argstr='--initialize-transforms-per-stage %d', default=False, usedefault=True, # This should be true for explicit completeness desc=('Initialize linear transforms from the previous stage. By enabling this option, ' 'the current linear stage transform is directly intialized from the previous ' 'stages linear transform; this allows multiple linear stages to be run where ' 'each stage directly updates the estimated linear transform from the previous ' 'stage. (e.g. Translation -> Rigid -> Affine). ' )) # NOTE: Even though only 0=False and 1=True are allowed, ants uses integer # values instead of booleans float = traits.Bool( argstr='--float %d', default=False, desc='Use float instead of double for computations.') transforms = traits.List(traits.Enum('Rigid', 'Affine', 'CompositeAffine', 'Similarity', 'Translation', 'BSpline', 'GaussianDisplacementField', 'TimeVaryingVelocityField', 'TimeVaryingBSplineVelocityField', 'SyN', 'BSplineSyN', 'Exponential', 'BSplineExponential'), argstr='%s', mandatory=True) # TODO: input checking and allow defaults # All parameters must be specified for BSplineDisplacementField, TimeVaryingBSplineVelocityField, BSplineSyN, # Exponential, and BSplineExponential. EVEN DEFAULTS! transform_parameters = traits.List(traits.Either(traits.Tuple(traits.Float()), # Translation, Rigid, Affine, # CompositeAffine, Similarity traits.Tuple(traits.Float(), # GaussianDisplacementField, SyN traits.Float(), traits.Float() ), traits.Tuple(traits.Float(), # BSplineSyn, traits.Int(), # BSplineDisplacementField, traits.Int(), # TimeVaryingBSplineVelocityField traits.Int() ), traits.Tuple(traits.Float(), # TimeVaryingVelocityField traits.Int(), traits.Float(), traits.Float(), traits.Float(), traits.Float() ), traits.Tuple(traits.Float(), # Exponential traits.Float(), traits.Float(), traits.Int() ), traits.Tuple(traits.Float(), # BSplineExponential traits.Int(), traits.Int(), traits.Int(), traits.Int() ), ) ) # Convergence flags number_of_iterations = traits.List(traits.List(traits.Int())) smoothing_sigmas = traits.List(traits.List(traits.Float()), mandatory=True) sigma_units = traits.List(traits.Enum('mm', 'vox'), requires=['smoothing_sigmas'], desc="units for smoothing sigmas") shrink_factors = traits.List(traits.List(traits.Int()), mandatory=True) convergence_threshold = traits.List(trait=traits.Float(), value=[ 1e-6], minlen=1, requires=['number_of_iterations'], usedefault=True) convergence_window_size = traits.List(trait=traits.Int(), value=[ 10], minlen=1, requires=['convergence_threshold'], usedefault=True) # Output flags output_transform_prefix = traits.Str( "transform", usedefault=True, argstr="%s", desc="") output_warped_image = traits.Either( traits.Bool, File(), hash_files=False, desc="") output_inverse_warped_image = traits.Either(traits.Bool, File(), hash_files=False, requires=['output_warped_image'], desc="") winsorize_upper_quantile = traits.Range( low=0.0, high=1.0, value=1.0, argstr='%s', usedefault=True, desc="The Upper quantile to clip image ranges") winsorize_lower_quantile = traits.Range( low=0.0, high=1.0, value=0.0, argstr='%s', usedefault=True, desc="The Lower quantile to clip image ranges") class RegistrationOutputSpec(TraitedSpec): forward_transforms = traits.List( File(exists=True), desc='List of output transforms for forward registration') reverse_transforms = traits.List( File(exists=True), desc='List of output transforms for reverse registration') forward_invert_flags = traits.List(traits.Bool( ), desc='List of flags corresponding to the forward transforms') reverse_invert_flags = traits.List(traits.Bool( ), desc='List of flags corresponding to the reverse transforms') composite_transform = File(exists=True, desc='Composite transform file') inverse_composite_transform = File(desc='Inverse composite transform file') warped_image = File(desc="Outputs warped image") inverse_warped_image = File(desc="Outputs the inverse of the warped image") save_state = File(desc="The saved registration state to be restored") class Registration(ANTSCommand): """ Examples -------- >>> import copy, pprint >>> from nipype.interfaces.ants import Registration >>> reg = Registration() >>> reg.inputs.fixed_image = 'fixed1.nii' >>> reg.inputs.moving_image = 'moving1.nii' >>> reg.inputs.output_transform_prefix = "output_" >>> reg.inputs.initial_moving_transform = 'trans.mat' >>> reg.inputs.invert_initial_moving_transform = True >>> reg.inputs.transforms = ['Affine', 'SyN'] >>> reg.inputs.transform_parameters = [(2.0,), (0.25, 3.0, 0.0)] >>> reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]] >>> reg.inputs.dimension = 3 >>> reg.inputs.write_composite_transform = True >>> reg.inputs.collapse_output_transforms = False >>> reg.inputs.initialize_transforms_per_stage = False >>> reg.inputs.metric = ['Mattes']*2 >>> reg.inputs.metric_weight = [1]*2 # Default (value ignored currently by ANTs) >>> reg.inputs.radius_or_number_of_bins = [32]*2 >>> reg.inputs.sampling_strategy = ['Random', None] >>> reg.inputs.sampling_percentage = [0.05, None] >>> reg.inputs.convergence_threshold = [1.e-8, 1.e-9] >>> reg.inputs.convergence_window_size = [20]*2 >>> reg.inputs.smoothing_sigmas = [[1,0], [2,1,0]] >>> reg.inputs.sigma_units = ['vox'] * 2 >>> reg.inputs.shrink_factors = [[2,1], [3,2,1]] >>> reg.inputs.use_estimate_learning_rate_once = [True, True] >>> reg.inputs.use_histogram_matching = [True, True] # This is the default >>> reg.inputs.output_warped_image = 'output_warped_image.nii.gz' >>> reg1 = copy.deepcopy(reg) >>> reg1.inputs.winsorize_lower_quantile = 0.025 >>> reg1.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1' >>> reg1.run() # doctest: +SKIP >>> reg2 = copy.deepcopy(reg) >>> reg2.inputs.winsorize_upper_quantile = 0.975 >>> reg2.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1' >>> reg3 = copy.deepcopy(reg) >>> reg3.inputs.winsorize_lower_quantile = 0.025 >>> reg3.inputs.winsorize_upper_quantile = 0.975 >>> reg3.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1' >>> reg3a = copy.deepcopy(reg) >>> reg3a.inputs.float = True >>> reg3a.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 \ --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear \ --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 1' >>> reg3b = copy.deepcopy(reg) >>> reg3b.inputs.float = False >>> reg3b.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 \ --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear \ --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 1' >>> # Test collapse transforms flag >>> reg4 = copy.deepcopy(reg) >>> reg4.inputs.save_state = 'trans.mat' >>> reg4.inputs.restore_state = 'trans.mat' >>> reg4.inputs.initialize_transforms_per_stage = True >>> reg4.inputs.collapse_output_transforms = True >>> outputs = reg4._list_outputs() >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE {'composite_transform': '.../nipype/testing/data/output_Composite.h5', 'forward_invert_flags': [], 'forward_transforms': [], 'inverse_composite_transform': '.../nipype/testing/data/output_InverseComposite.h5', 'inverse_warped_image': <undefined>, 'reverse_invert_flags': [], 'reverse_transforms': [], 'save_state': '.../nipype/testing/data/trans.mat', 'warped_image': '.../nipype/testing/data/output_warped_image.nii.gz'} >>> reg4.cmdline 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 1' >>> # Test collapse transforms flag >>> reg4b = copy.deepcopy(reg4) >>> reg4b.inputs.write_composite_transform = False >>> outputs = reg4b._list_outputs() >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE {'composite_transform': <undefined>, 'forward_invert_flags': [False, False], 'forward_transforms': ['.../nipype/testing/data/output_0GenericAffine.mat', '.../nipype/testing/data/output_1Warp.nii.gz'], 'inverse_composite_transform': <undefined>, 'inverse_warped_image': <undefined>, 'reverse_invert_flags': [True, False], 'reverse_transforms': ['.../nipype/testing/data/output_0GenericAffine.mat', \ '.../nipype/testing/data/output_1InverseWarp.nii.gz'], 'save_state': '.../nipype/testing/data/trans.mat', 'warped_image': '.../nipype/testing/data/output_warped_image.nii.gz'} >>> reg4b.aggregate_outputs() # doctest: +SKIP >>> reg4b.cmdline 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 0' >>> # Test multiple metrics per stage >>> reg5 = copy.deepcopy(reg) >>> reg5.inputs.fixed_image = 'fixed1.nii' >>> reg5.inputs.moving_image = 'moving1.nii' >>> reg5.inputs.metric = ['Mattes', ['Mattes', 'CC']] >>> reg5.inputs.metric_weight = [1, [.5,.5]] >>> reg5.inputs.radius_or_number_of_bins = [32, [32, 4] ] >>> reg5.inputs.sampling_strategy = ['Random', None] # use default strategy in second stage >>> reg5.inputs.sampling_percentage = [0.05, [0.05, 0.10]] >>> reg5.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] \ --metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] \ --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' >>> # Test multiple inputs >>> reg6 = copy.deepcopy(reg5) >>> reg6.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] >>> reg6.inputs.moving_image = ['moving1.nii', 'moving2.nii'] >>> reg6.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] \ --metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] \ --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' >>> # Test Interpolation Parameters (BSpline) >>> reg7a = copy.deepcopy(reg) >>> reg7a.inputs.interpolation = 'BSpline' >>> reg7a.inputs.interpolation_parameters = (3,) >>> reg7a.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' >>> # Test Interpolation Parameters (MultiLabel/Gaussian) >>> reg7b = copy.deepcopy(reg) >>> reg7b.inputs.interpolation = 'Gaussian' >>> reg7b.inputs.interpolation_parameters = (1.0, 1.0) >>> reg7b.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] \ --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ --write-composite-transform 1' >>> # Test Extended Transform Parameters >>> reg8 = copy.deepcopy(reg) >>> reg8.inputs.transforms = ['Affine', 'BSplineSyN'] >>> reg8.inputs.transform_parameters = [(2.0,), (0.25, 26, 0, 3)] >>> reg8.cmdline 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] \ --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' """ DEF_SAMPLING_STRATEGY = 'None' """The default sampling strategy argument.""" _cmd = 'antsRegistration' input_spec = RegistrationInputSpec output_spec = RegistrationOutputSpec _quantilesDone = False _linear_transform_names = ['Rigid', 'Affine', 'Translation', 'CompositeAffine', 'Similarity'] def _format_metric(self, index): """ Format the antsRegistration -m metric argument(s). Parameters ---------- index: the stage index """ # The metric name input for the current stage. name_input = self.inputs.metric[index] # The stage-specific input dictionary. stage_inputs = dict( fixed_image=self.inputs.fixed_image[0], moving_image=self.inputs.moving_image[0], metric=name_input, weight=self.inputs.metric_weight[index], radius_or_bins=self.inputs.radius_or_number_of_bins[index], optional=self.inputs.radius_or_number_of_bins[index] ) # The optional sampling strategy and percentage. if isdefined(self.inputs.sampling_strategy) and self.inputs.sampling_strategy: sampling_strategy = self.inputs.sampling_strategy[index] if sampling_strategy: stage_inputs['sampling_strategy'] = sampling_strategy if isdefined(self.inputs.sampling_percentage) and self.inputs.sampling_percentage: sampling_percentage = self.inputs.sampling_percentage[index] if sampling_percentage: stage_inputs['sampling_percentage'] = sampling_percentage # Make a list of metric specifications, one per -m command line # argument for the current stage. # If there are multiple inputs for this stage, then convert the # dictionary of list inputs into a list of metric specifications. # Otherwise, make a singleton list of the metric specification # from the non-list inputs. if isinstance(name_input, list): items = list(stage_inputs.items()) indexes = list(range(0, len(name_input))) specs = list() for i in indexes: temp = dict([(k, v[i]) for k, v in items]) if len(self.inputs.fixed_image) == 1: temp["fixed_image"] = self.inputs.fixed_image[0] else: temp["fixed_image"] = self.inputs.fixed_image[i] if len(self.inputs.moving_image) == 1: temp["moving_image"] = self.inputs.moving_image[0] else: temp["moving_image"] = self.inputs.moving_image[i] specs.append(temp) else: specs = [stage_inputs] # Format the --metric command line metric arguments, one per # specification. return [self._format_metric_argument(**spec) for spec in specs] @staticmethod def _format_metric_argument(**kwargs): retval = '%s[ %s, %s, %g, %d' % (kwargs['metric'], kwargs['fixed_image'], kwargs['moving_image'], kwargs['weight'], kwargs['radius_or_bins']) # The optional sampling strategy. if 'sampling_strategy' in kwargs: sampling_strategy = kwargs['sampling_strategy'] elif 'sampling_percentage' in kwargs: # The sampling percentage is specified but not the # sampling strategy. Use the default strategy. sampling_strategy = Registration.DEF_SAMPLING_STRATEGY else: sampling_strategy = None # Format the optional sampling arguments. if sampling_strategy: retval += ', %s' % sampling_strategy if 'sampling_percentage' in kwargs: retval += ', %g' % kwargs['sampling_percentage'] retval += ' ]' return retval def _format_transform(self, index): retval = [] retval.append('%s[ ' % self.inputs.transforms[index]) parameters = ', '.join([str( element) for element in self.inputs.transform_parameters[index]]) retval.append('%s' % parameters) retval.append(' ]') return "".join(retval) def _format_registration(self): retval = [] for ii in range(len(self.inputs.transforms)): retval.append('--transform %s' % (self._format_transform(ii))) for metric in self._format_metric(ii): retval.append('--metric %s' % metric) retval.append('--convergence %s' % self._format_convergence(ii)) if isdefined(self.inputs.sigma_units): retval.append('--smoothing-sigmas %s%s' % (self._format_xarray(self.inputs.smoothing_sigmas[ii]), self.inputs.sigma_units[ii])) else: retval.append('--smoothing-sigmas %s' % self._format_xarray(self.inputs.smoothing_sigmas[ii])) retval.append('--shrink-factors %s' % self._format_xarray(self.inputs.shrink_factors[ii])) if isdefined(self.inputs.use_estimate_learning_rate_once): retval.append('--use-estimate-learning-rate-once %d' % self.inputs.use_estimate_learning_rate_once[ii]) if isdefined(self.inputs.use_histogram_matching): # use_histogram_matching is either a common flag for all transforms # or a list of transform-specific flags if isinstance(self.inputs.use_histogram_matching, bool): histval = self.inputs.use_histogram_matching else: histval = self.inputs.use_histogram_matching[ii] retval.append('--use-histogram-matching %d' % histval) return " ".join(retval) def _get_outputfilenames(self, inverse=False): output_filename = None if not inverse: if isdefined(self.inputs.output_warped_image) and \ self.inputs.output_warped_image: output_filename = self.inputs.output_warped_image if isinstance(output_filename, bool): output_filename = '%s_Warped.nii.gz' % self.inputs.output_transform_prefix else: output_filename = output_filename return output_filename inv_output_filename = None if isdefined(self.inputs.output_inverse_warped_image) and \ self.inputs.output_inverse_warped_image: inv_output_filename = self.inputs.output_inverse_warped_image if isinstance(inv_output_filename, bool): inv_output_filename = '%s_InverseWarped.nii.gz' % self.inputs.output_transform_prefix else: inv_output_filename = inv_output_filename return inv_output_filename def _format_convergence(self, ii): convergence_iter = self._format_xarray(self.inputs.number_of_iterations[ii]) if len(self.inputs.convergence_threshold) > ii: convergence_value = self.inputs.convergence_threshold[ii] else: convergence_value = self.inputs.convergence_threshold[0] if len(self.inputs.convergence_window_size) > ii: convergence_ws = self.inputs.convergence_window_size[ii] else: convergence_ws = self.inputs.convergence_window_size[0] return '[ %s, %g, %d ]' % (convergence_iter, convergence_value, convergence_ws) def _format_winsorize_image_intensities(self): if not self.inputs.winsorize_upper_quantile > self.inputs.winsorize_lower_quantile: raise RuntimeError("Upper bound MUST be more than lower bound: %g > %g" % (self.inputs.winsorize_upper_quantile, self.inputs.winsorize_lower_quantile)) self._quantilesDone = True return '--winsorize-image-intensities [ %s, %s ]' % (self.inputs.winsorize_lower_quantile, self.inputs.winsorize_upper_quantile) def _format_arg(self, opt, spec, val): if opt == 'fixed_image_mask': if isdefined(self.inputs.moving_image_mask): return '--masks [ %s, %s ]' % (self.inputs.fixed_image_mask, self.inputs.moving_image_mask) else: return '--masks %s' % self.inputs.fixed_image_mask elif opt == 'transforms': return self._format_registration() elif opt == 'initial_moving_transform': try: do_invert_transform = int(self.inputs.invert_initial_moving_transform) except ValueError: do_invert_transform = 0 # Just do the default behavior return '--initial-moving-transform [ %s, %d ]' % (self.inputs.initial_moving_transform, do_invert_transform) elif opt == 'initial_moving_transform_com': try: do_center_of_mass_init = int(self.inputs.initial_moving_transform_com) except ValueError: do_center_of_mass_init = 0 # Just do the default behavior return '--initial-moving-transform [ %s, %s, %d ]' % (self.inputs.fixed_image[0], self.inputs.moving_image[0], do_center_of_mass_init) elif opt == 'interpolation': if self.inputs.interpolation in ['BSpline', 'MultiLabel', 'Gaussian'] and \ isdefined(self.inputs.interpolation_parameters): return '--interpolation %s[ %s ]' % (self.inputs.interpolation, ', '.join([str(param) for param in self.inputs.interpolation_parameters])) else: return '--interpolation %s' % self.inputs.interpolation elif opt == 'output_transform_prefix': out_filename = self._get_outputfilenames(inverse=False) inv_out_filename = self._get_outputfilenames(inverse=True) if out_filename and inv_out_filename: return '--output [ %s, %s, %s ]' % (self.inputs.output_transform_prefix, out_filename, inv_out_filename) elif out_filename: return '--output [ %s, %s ]' % (self.inputs.output_transform_prefix, out_filename) else: return '--output %s' % self.inputs.output_transform_prefix elif opt == 'winsorize_upper_quantile' or opt == 'winsorize_lower_quantile': if not self._quantilesDone: return self._format_winsorize_image_intensities() else: self._quantilesDone = False return '' # Must return something for argstr! # This feature was removed from recent versions of antsRegistration due to corrupt outputs. # elif opt == 'collapse_linear_transforms_to_fixed_image_header': # return self._formatCollapseLinearTransformsToFixedImageHeader() return super(Registration, self)._format_arg(opt, spec, val) def _output_filenames(self, prefix, count, transform, inverse=False): self.low_dimensional_transform_map = {'Rigid': 'Rigid.mat', 'Affine': 'Affine.mat', 'GenericAffine': 'GenericAffine.mat', 'CompositeAffine': 'Affine.mat', 'Similarity': 'Similarity.mat', 'Translation': 'Translation.mat', 'BSpline': 'BSpline.txt', 'Initial': 'DerivedInitialMovingTranslation.mat'} if transform in list(self.low_dimensional_transform_map.keys()): suffix = self.low_dimensional_transform_map[transform] inverse_mode = inverse else: inverse_mode = False # These are not analytically invertable if inverse: suffix = 'InverseWarp.nii.gz' else: suffix = 'Warp.nii.gz' return '%s%d%s' % (prefix, count, suffix), inverse_mode def _list_outputs(self): outputs = self._outputs().get() outputs['forward_transforms'] = [] outputs['forward_invert_flags'] = [] outputs['reverse_transforms'] = [] outputs['reverse_invert_flags'] = [] # invert_initial_moving_transform should be always defined, even if # there's no initial transform invert_initial_moving_transform = False if isdefined(self.inputs.invert_initial_moving_transform): invert_initial_moving_transform = self.inputs.invert_initial_moving_transform if self.inputs.write_composite_transform: filename = self.inputs.output_transform_prefix + 'Composite.h5' outputs['composite_transform'] = os.path.abspath(filename) filename = self.inputs.output_transform_prefix + \ 'InverseComposite.h5' outputs['inverse_composite_transform'] = os.path.abspath(filename) else: # If composite transforms are written, then individuals are not written (as of 2014-10-26 if not self.inputs.collapse_output_transforms: transform_count = 0 if isdefined(self.inputs.initial_moving_transform): outputs['forward_transforms'].append(self.inputs.initial_moving_transform) outputs['forward_invert_flags'].append(invert_initial_moving_transform) outputs['reverse_transforms'].insert(0, self.inputs.initial_moving_transform) outputs['reverse_invert_flags'].insert(0, not invert_initial_moving_transform) # Prepend transform_count += 1 elif isdefined(self.inputs.initial_moving_transform_com): forward_filename, forward_inversemode = self._output_filenames( self.inputs.output_transform_prefix, transform_count, 'Initial') reverse_filename, reverse_inversemode = self._output_filenames( self.inputs.output_transform_prefix, transform_count, 'Initial', True) outputs['forward_transforms'].append(os.path.abspath(forward_filename)) outputs['forward_invert_flags'].append(False) outputs['reverse_transforms'].insert(0, os.path.abspath(reverse_filename)) outputs['reverse_invert_flags'].insert(0, True) transform_count += 1 for count in range(len(self.inputs.transforms)): forward_filename, forward_inversemode = self._output_filenames( self.inputs.output_transform_prefix, transform_count, self.inputs.transforms[count]) reverse_filename, reverse_inversemode = self._output_filenames( self.inputs.output_transform_prefix, transform_count, self.inputs.transforms[count], True) outputs['forward_transforms'].append(os.path.abspath(forward_filename)) outputs['forward_invert_flags'].append(forward_inversemode) outputs['reverse_transforms'].insert(0, os.path.abspath(reverse_filename)) outputs['reverse_invert_flags'].insert(0, reverse_inversemode) transform_count += 1 else: transform_count = 0 is_linear = [t in self._linear_transform_names for t in self.inputs.transforms] collapse_list = [] if isdefined(self.inputs.initial_moving_transform) or \ isdefined(self.inputs.initial_moving_transform_com): is_linear.insert(0, True) # Only files returned by collapse_output_transforms if any(is_linear): collapse_list.append('GenericAffine') if not all(is_linear): collapse_list.append('SyN') for transform in collapse_list: forward_filename, forward_inversemode = self._output_filenames( self.inputs.output_transform_prefix, transform_count, transform, inverse=False) reverse_filename, reverse_inversemode = self._output_filenames( self.inputs.output_transform_prefix, transform_count, transform, inverse=True) outputs['forward_transforms'].append(os.path.abspath(forward_filename)) outputs['forward_invert_flags'].append(forward_inversemode) outputs['reverse_transforms'].append(os.path.abspath(reverse_filename)) outputs['reverse_invert_flags'].append(reverse_inversemode) transform_count += 1 out_filename = self._get_outputfilenames(inverse=False) inv_out_filename = self._get_outputfilenames(inverse=True) if out_filename: outputs['warped_image'] = os.path.abspath(out_filename) if inv_out_filename: outputs['inverse_warped_image'] = os.path.abspath(inv_out_filename) if len(self.inputs.save_state): outputs['save_state'] = os.path.abspath(self.inputs.save_state) return outputs
FCP-INDI/nipype
nipype/interfaces/ants/registration.py
Python
bsd-3-clause
55,905
[ "Gaussian", "VTK" ]
7bcb0a822d937b588404b32896e7ae511ab83f9e10c06fd9273ad5d46eeb6350
from Firefly import logging, scheduler from Firefly.const import COMMAND_SET_LIGHT from Firefly.helpers.events import Command # TODO: Refactor this code. class CTFade(object): def __init__(self, firefly, ff_id, start_k, end_k, fade_sec, start_level=100, end_level=100, run=True): if type(start_level) is not int: start_level = 100 if type(end_level) is not int: end_level = 100 self._firefly = firefly self._ff_id = ff_id self._start_ct = start_k self._end_ct = end_k self._start_level = max(start_level, 1) self._end_level = end_level self._fade_sec = fade_sec self._run = run self._time_remaining = self._fade_sec self._current_ct = self._start_ct self._current_level = self._start_level self._level_step = 0 self._first_run = True self._interval = 1 if self._fade_sec > 100: self._interval = 4 if self._fade_sec > 300: self._interval = 10 if self._fade_sec > 600: self._interval = 20 if self._fade_sec > 900: self._interval = 30 self._delay = calculate_delay(self._interval, self._fade_sec) self._ct_step = calculate_ct_step(self._interval, self._fade_sec, self._start_ct, self._end_ct) self._level_control = True if self._start_level and self._end_level else False if self._level_control: self._level_step = calculate_level_step(self._interval, self._fade_sec, self._start_level, self._end_level) if self._level_step == 0: self._current_level = self._end_level if self._run: self.runFade() def runFade(self): if not self._run: return if self._first_run: if self._level_control: command = Command(self._ff_id, 'ct_fade', COMMAND_SET_LIGHT, level=self._current_level, ct=ct_string(self._start_ct), ct_fade=True, transitiontime=1) else: command = Command(self._ff_id, 'ct_fade', COMMAND_SET_LIGHT, ct=ct_string(self._start_ct), ct_fade=True, transitiontime=1) self._firefly.send_command(command) self._first_run = False self._current_ct += self._ct_step self._time_remaining -= self._delay if self._level_control: self._current_level += self._level_step if self._level_control: command = Command(self._ff_id, 'ct_fade', COMMAND_SET_LIGHT, ct=ct_string(self._current_ct), transitiontime=self._delay * 10, level=self._current_level, ct_fade=True) else: command = Command(self._ff_id, 'ct_fade', COMMAND_SET_LIGHT, ct=ct_string(self._current_ct), transitiontime=self._delay * 10, ct_fade=True) self._firefly.send_command(command) if self._time_remaining > 0: scheduler.runInS(int(self._delay), self.runFade, replace=True, job_id=self._ff_id + '_ct_fade') else: self.send_last_fade() def send_last_fade(self): if self._level_control: command = Command(self._ff_id, 'ct_fade', COMMAND_SET_LIGHT, ct=ct_string(self._end_ct), transitiontime=self._delay * 10, level=self._end_level, ct_fade=True) else: command = Command(self._ff_id, 'ct_fade', COMMAND_SET_LIGHT, ct=ct_string(self._end_ct), transitiontime=self._delay * 10, ct_fade=True) self._firefly.send_command(command) self.endRun() def endRun(self): logging.info("Ending Fade") self._run = False self._first_run = True def calculate_delay(interval: int, fade_sec: int): if fade_sec < 10: return 0 return int(fade_sec / interval) def calculate_ct_step(interval: int, fade_sec: int, start_ct: int, end_ct: int): if fade_sec < 10: return end_ct else: return int((end_ct - start_ct) / interval) def calculate_level_step(interval: int, fade_sec: int, start_level: int, end_level: int): if abs(start_level - end_level) <= interval or fade_sec < 10: return start_level return int((end_level - start_level) / interval) def ct_string(ct): return '%sK' % ct
Firefly-Automation/Firefly
Firefly/components/hue/ct_fade.py
Python
apache-2.0
4,030
[ "Firefly" ]
52fbb743158a2ca62ec04ef6fd9d1d90da7fbd432d453e0029d93398842c0ed3
from time import time from collections import namedtuple import warnings from scipy import stats import numpy as np from ..base import clone from ..exceptions import ConvergenceWarning from ..preprocessing import normalize from ..utils import (check_array, check_random_state, _safe_indexing, is_scalar_nan) from ..utils.validation import FLOAT_DTYPES, check_is_fitted from ..utils._mask import _get_mask from ._base import _BaseImputer from ._base import SimpleImputer from ._base import _check_inputs_dtype _ImputerTriplet = namedtuple('_ImputerTriplet', ['feat_idx', 'neighbor_feat_idx', 'estimator']) class IterativeImputer(_BaseImputer): """Multivariate imputer that estimates each feature from all the others. A strategy for imputing missing values by modeling each feature with missing values as a function of other features in a round-robin fashion. Read more in the :ref:`User Guide <iterative_imputer>`. .. versionadded:: 0.21 .. note:: This estimator is still **experimental** for now: the predictions and the API might change without any deprecation cycle. To use it, you need to explicitly import ``enable_iterative_imputer``:: >>> # explicitly require this experimental feature >>> from sklearn.experimental import enable_iterative_imputer # noqa >>> # now you can import normally from sklearn.impute >>> from sklearn.impute import IterativeImputer Parameters ---------- estimator : estimator object, default=BayesianRidge() The estimator to use at each step of the round-robin imputation. If ``sample_posterior`` is True, the estimator must support ``return_std`` in its ``predict`` method. missing_values : int, np.nan, default=np.nan The placeholder for the missing values. All occurrences of `missing_values` will be imputed. For pandas' dataframes with nullable integer dtypes with missing values, `missing_values` should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`. sample_posterior : boolean, default=False Whether to sample from the (Gaussian) predictive posterior of the fitted estimator for each imputation. Estimator must support ``return_std`` in its ``predict`` method if set to ``True``. Set to ``True`` if using ``IterativeImputer`` for multiple imputations. max_iter : int, default=10 Maximum number of imputation rounds to perform before returning the imputations computed during the final round. A round is a single imputation of each feature with missing values. The stopping criterion is met once `max(abs(X_t - X_{t-1}))/max(abs(X[known_vals])) < tol`, where `X_t` is `X` at iteration `t`. Note that early stopping is only applied if ``sample_posterior=False``. tol : float, default=1e-3 Tolerance of the stopping condition. n_nearest_features : int, default=None Number of other features to use to estimate the missing values of each feature column. Nearness between features is measured using the absolute correlation coefficient between each feature pair (after initial imputation). To ensure coverage of features throughout the imputation process, the neighbor features are not necessarily nearest, but are drawn with probability proportional to correlation for each imputed target feature. Can provide significant speed-up when the number of features is huge. If ``None``, all features will be used. initial_strategy : str, default='mean' Which strategy to use to initialize the missing values. Same as the ``strategy`` parameter in :class:`~sklearn.impute.SimpleImputer` Valid values: {"mean", "median", "most_frequent", or "constant"}. imputation_order : str, default='ascending' The order in which the features will be imputed. Possible values: "ascending" From features with fewest missing values to most. "descending" From features with most missing values to fewest. "roman" Left to right. "arabic" Right to left. "random" A random order for each round. skip_complete : boolean, default=False If ``True`` then features with missing values during ``transform`` which did not have any missing values during ``fit`` will be imputed with the initial imputation method only. Set to ``True`` if you have many features with no missing values at both ``fit`` and ``transform`` time to save compute. min_value : float or array-like of shape (n_features,), default=-np.inf Minimum possible imputed value. Broadcast to shape (n_features,) if scalar. If array-like, expects shape (n_features,), one min value for each feature. The default is `-np.inf`. .. versionchanged:: 0.23 Added support for array-like. max_value : float or array-like of shape (n_features,), default=np.inf Maximum possible imputed value. Broadcast to shape (n_features,) if scalar. If array-like, expects shape (n_features,), one max value for each feature. The default is `np.inf`. .. versionchanged:: 0.23 Added support for array-like. verbose : int, default=0 Verbosity flag, controls the debug messages that are issued as functions are evaluated. The higher, the more verbose. Can be 0, 1, or 2. random_state : int, RandomState instance or None, default=None The seed of the pseudo random number generator to use. Randomizes selection of estimator features if n_nearest_features is not None, the ``imputation_order`` if ``random``, and the sampling from posterior if ``sample_posterior`` is True. Use an integer for determinism. See :term:`the Glossary <random_state>`. add_indicator : boolean, default=False If True, a :class:`MissingIndicator` transform will stack onto output of the imputer's transform. This allows a predictive estimator to account for missingness despite imputation. If a feature has no missing values at fit/train time, the feature won't appear on the missing indicator even if there are missing values at transform/test time. Attributes ---------- initial_imputer_ : object of type :class:`~sklearn.impute.SimpleImputer` Imputer used to initialize the missing values. imputation_sequence_ : list of tuples Each tuple has ``(feat_idx, neighbor_feat_idx, estimator)``, where ``feat_idx`` is the current feature to be imputed, ``neighbor_feat_idx`` is the array of other features used to impute the current feature, and ``estimator`` is the trained estimator used for the imputation. Length is ``self.n_features_with_missing_ * self.n_iter_``. n_iter_ : int Number of iteration rounds that occurred. Will be less than ``self.max_iter`` if early stopping criterion was reached. n_features_with_missing_ : int Number of features with missing values. indicator_ : :class:`~sklearn.impute.MissingIndicator` Indicator used to add binary indicators for missing values. ``None`` if add_indicator is False. random_state_ : RandomState instance RandomState instance that is generated either from a seed, the random number generator or by `np.random`. See Also -------- SimpleImputer : Univariate imputation of missing values. Examples -------- >>> import numpy as np >>> from sklearn.experimental import enable_iterative_imputer >>> from sklearn.impute import IterativeImputer >>> imp_mean = IterativeImputer(random_state=0) >>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]]) IterativeImputer(random_state=0) >>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]] >>> imp_mean.transform(X) array([[ 6.9584..., 2. , 3. ], [ 4. , 2.6000..., 6. ], [10. , 4.9999..., 9. ]]) Notes ----- To support imputation in inductive mode we store each feature's estimator during the ``fit`` phase, and predict without refitting (in order) during the ``transform`` phase. Features which contain all missing values at ``fit`` are discarded upon ``transform``. References ---------- .. [1] `Stef van Buuren, Karin Groothuis-Oudshoorn (2011). "mice: Multivariate Imputation by Chained Equations in R". Journal of Statistical Software 45: 1-67. <https://www.jstatsoft.org/article/view/v045i03>`_ .. [2] `S. F. Buck, (1960). "A Method of Estimation of Missing Values in Multivariate Data Suitable for use with an Electronic Computer". Journal of the Royal Statistical Society 22(2): 302-306. <https://www.jstor.org/stable/2984099>`_ """ def __init__(self, estimator=None, *, missing_values=np.nan, sample_posterior=False, max_iter=10, tol=1e-3, n_nearest_features=None, initial_strategy="mean", imputation_order='ascending', skip_complete=False, min_value=-np.inf, max_value=np.inf, verbose=0, random_state=None, add_indicator=False): super().__init__( missing_values=missing_values, add_indicator=add_indicator ) self.estimator = estimator self.sample_posterior = sample_posterior self.max_iter = max_iter self.tol = tol self.n_nearest_features = n_nearest_features self.initial_strategy = initial_strategy self.imputation_order = imputation_order self.skip_complete = skip_complete self.min_value = min_value self.max_value = max_value self.verbose = verbose self.random_state = random_state def _impute_one_feature(self, X_filled, mask_missing_values, feat_idx, neighbor_feat_idx, estimator=None, fit_mode=True): """Impute a single feature from the others provided. This function predicts the missing values of one of the features using the current estimates of all the other features. The ``estimator`` must support ``return_std=True`` in its ``predict`` method for this function to work. Parameters ---------- X_filled : ndarray Input data with the most recent imputations. mask_missing_values : ndarray Input data's missing indicator matrix. feat_idx : int Index of the feature currently being imputed. neighbor_feat_idx : ndarray Indices of the features to be used in imputing ``feat_idx``. estimator : object The estimator to use at this step of the round-robin imputation. If ``sample_posterior`` is True, the estimator must support ``return_std`` in its ``predict`` method. If None, it will be cloned from self._estimator. fit_mode : boolean, default=True Whether to fit and predict with the estimator or just predict. Returns ------- X_filled : ndarray Input data with ``X_filled[missing_row_mask, feat_idx]`` updated. estimator : estimator with sklearn API The fitted estimator used to impute ``X_filled[missing_row_mask, feat_idx]``. """ if estimator is None and fit_mode is False: raise ValueError("If fit_mode is False, then an already-fitted " "estimator should be passed in.") if estimator is None: estimator = clone(self._estimator) missing_row_mask = mask_missing_values[:, feat_idx] if fit_mode: X_train = _safe_indexing(X_filled[:, neighbor_feat_idx], ~missing_row_mask) y_train = _safe_indexing(X_filled[:, feat_idx], ~missing_row_mask) estimator.fit(X_train, y_train) # if no missing values, don't predict if np.sum(missing_row_mask) == 0: return X_filled, estimator # get posterior samples if there is at least one missing value X_test = _safe_indexing(X_filled[:, neighbor_feat_idx], missing_row_mask) if self.sample_posterior: mus, sigmas = estimator.predict(X_test, return_std=True) imputed_values = np.zeros(mus.shape, dtype=X_filled.dtype) # two types of problems: (1) non-positive sigmas # (2) mus outside legal range of min_value and max_value # (results in inf sample) positive_sigmas = sigmas > 0 imputed_values[~positive_sigmas] = mus[~positive_sigmas] mus_too_low = mus < self._min_value[feat_idx] imputed_values[mus_too_low] = self._min_value[feat_idx] mus_too_high = mus > self._max_value[feat_idx] imputed_values[mus_too_high] = self._max_value[feat_idx] # the rest can be sampled without statistical issues inrange_mask = positive_sigmas & ~mus_too_low & ~mus_too_high mus = mus[inrange_mask] sigmas = sigmas[inrange_mask] a = (self._min_value[feat_idx] - mus) / sigmas b = (self._max_value[feat_idx] - mus) / sigmas truncated_normal = stats.truncnorm(a=a, b=b, loc=mus, scale=sigmas) imputed_values[inrange_mask] = truncated_normal.rvs( random_state=self.random_state_) else: imputed_values = estimator.predict(X_test) imputed_values = np.clip(imputed_values, self._min_value[feat_idx], self._max_value[feat_idx]) # update the feature X_filled[missing_row_mask, feat_idx] = imputed_values return X_filled, estimator def _get_neighbor_feat_idx(self, n_features, feat_idx, abs_corr_mat): """Get a list of other features to predict ``feat_idx``. If self.n_nearest_features is less than or equal to the total number of features, then use a probability proportional to the absolute correlation between ``feat_idx`` and each other feature to randomly choose a subsample of the other features (without replacement). Parameters ---------- n_features : int Number of features in ``X``. feat_idx : int Index of the feature currently being imputed. abs_corr_mat : ndarray, shape (n_features, n_features) Absolute correlation matrix of ``X``. The diagonal has been zeroed out and each feature has been normalized to sum to 1. Can be None. Returns ------- neighbor_feat_idx : array-like The features to use to impute ``feat_idx``. """ if (self.n_nearest_features is not None and self.n_nearest_features < n_features): p = abs_corr_mat[:, feat_idx] neighbor_feat_idx = self.random_state_.choice( np.arange(n_features), self.n_nearest_features, replace=False, p=p) else: inds_left = np.arange(feat_idx) inds_right = np.arange(feat_idx + 1, n_features) neighbor_feat_idx = np.concatenate((inds_left, inds_right)) return neighbor_feat_idx def _get_ordered_idx(self, mask_missing_values): """Decide in what order we will update the features. As a homage to the MICE R package, we will have 4 main options of how to order the updates, and use a random order if anything else is specified. Also, this function skips features which have no missing values. Parameters ---------- mask_missing_values : array-like, shape (n_samples, n_features) Input data's missing indicator matrix, where "n_samples" is the number of samples and "n_features" is the number of features. Returns ------- ordered_idx : ndarray, shape (n_features,) The order in which to impute the features. """ frac_of_missing_values = mask_missing_values.mean(axis=0) if self.skip_complete: missing_values_idx = np.flatnonzero(frac_of_missing_values) else: missing_values_idx = np.arange(np.shape(frac_of_missing_values)[0]) if self.imputation_order == 'roman': ordered_idx = missing_values_idx elif self.imputation_order == 'arabic': ordered_idx = missing_values_idx[::-1] elif self.imputation_order == 'ascending': n = len(frac_of_missing_values) - len(missing_values_idx) ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:] elif self.imputation_order == 'descending': n = len(frac_of_missing_values) - len(missing_values_idx) ordered_idx = np.argsort(frac_of_missing_values, kind='mergesort')[n:][::-1] elif self.imputation_order == 'random': ordered_idx = missing_values_idx self.random_state_.shuffle(ordered_idx) else: raise ValueError("Got an invalid imputation order: '{0}'. It must " "be one of the following: 'roman', 'arabic', " "'ascending', 'descending', or " "'random'.".format(self.imputation_order)) return ordered_idx def _get_abs_corr_mat(self, X_filled, tolerance=1e-6): """Get absolute correlation matrix between features. Parameters ---------- X_filled : ndarray, shape (n_samples, n_features) Input data with the most recent imputations. tolerance : float, default=1e-6 ``abs_corr_mat`` can have nans, which will be replaced with ``tolerance``. Returns ------- abs_corr_mat : ndarray, shape (n_features, n_features) Absolute correlation matrix of ``X`` at the beginning of the current round. The diagonal has been zeroed out and each feature's absolute correlations with all others have been normalized to sum to 1. """ n_features = X_filled.shape[1] if (self.n_nearest_features is None or self.n_nearest_features >= n_features): return None with np.errstate(invalid='ignore'): # if a feature in the neighboorhood has only a single value # (e.g., categorical feature), the std. dev. will be null and # np.corrcoef will raise a warning due to a division by zero abs_corr_mat = np.abs(np.corrcoef(X_filled.T)) # np.corrcoef is not defined for features with zero std abs_corr_mat[np.isnan(abs_corr_mat)] = tolerance # ensures exploration, i.e. at least some probability of sampling np.clip(abs_corr_mat, tolerance, None, out=abs_corr_mat) # features are not their own neighbors np.fill_diagonal(abs_corr_mat, 0) # needs to sum to 1 for np.random.choice sampling abs_corr_mat = normalize(abs_corr_mat, norm='l1', axis=0, copy=False) return abs_corr_mat def _initial_imputation(self, X, in_fit=False): """Perform initial imputation for input X. Parameters ---------- X : ndarray, shape (n_samples, n_features) Input data, where "n_samples" is the number of samples and "n_features" is the number of features. in_fit : bool, default=False Whether function is called in fit. Returns ------- Xt : ndarray, shape (n_samples, n_features) Input data, where "n_samples" is the number of samples and "n_features" is the number of features. X_filled : ndarray, shape (n_samples, n_features) Input data with the most recent imputations. mask_missing_values : ndarray, shape (n_samples, n_features) Input data's missing indicator matrix, where "n_samples" is the number of samples and "n_features" is the number of features. X_missing_mask : ndarray, shape (n_samples, n_features) Input data's mask matrix indicating missing datapoints, where "n_samples" is the number of samples and "n_features" is the number of features. """ if is_scalar_nan(self.missing_values): force_all_finite = "allow-nan" else: force_all_finite = True X = self._validate_data(X, dtype=FLOAT_DTYPES, order="F", reset=in_fit, force_all_finite=force_all_finite) _check_inputs_dtype(X, self.missing_values) X_missing_mask = _get_mask(X, self.missing_values) mask_missing_values = X_missing_mask.copy() if self.initial_imputer_ is None: self.initial_imputer_ = SimpleImputer( missing_values=self.missing_values, strategy=self.initial_strategy ) X_filled = self.initial_imputer_.fit_transform(X) else: X_filled = self.initial_imputer_.transform(X) valid_mask = np.flatnonzero(np.logical_not( np.isnan(self.initial_imputer_.statistics_))) Xt = X[:, valid_mask] mask_missing_values = mask_missing_values[:, valid_mask] return Xt, X_filled, mask_missing_values, X_missing_mask @staticmethod def _validate_limit(limit, limit_type, n_features): """Validate the limits (min/max) of the feature values Converts scalar min/max limits to vectors of shape (n_features,) Parameters ---------- limit: scalar or array-like The user-specified limit (i.e, min_value or max_value) limit_type: string, "max" or "min" n_features: Number of features in the dataset Returns ------- limit: ndarray, shape(n_features,) Array of limits, one for each feature """ limit_bound = np.inf if limit_type == "max" else -np.inf limit = limit_bound if limit is None else limit if np.isscalar(limit): limit = np.full(n_features, limit) limit = check_array( limit, force_all_finite=False, copy=False, ensure_2d=False ) if not limit.shape[0] == n_features: raise ValueError( f"'{limit_type}_value' should be of " f"shape ({n_features},) when an array-like " f"is provided. Got {limit.shape}, instead." ) return limit def fit_transform(self, X, y=None): """Fits the imputer on X and return the transformed X. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data, where "n_samples" is the number of samples and "n_features" is the number of features. y : ignored. Returns ------- Xt : array-like, shape (n_samples, n_features) The imputed input data. """ self.random_state_ = getattr(self, "random_state_", check_random_state(self.random_state)) if self.max_iter < 0: raise ValueError( "'max_iter' should be a positive integer. Got {} instead." .format(self.max_iter)) if self.tol < 0: raise ValueError( "'tol' should be a non-negative float. Got {} instead." .format(self.tol) ) if self.estimator is None: from ..linear_model import BayesianRidge self._estimator = BayesianRidge() else: self._estimator = clone(self.estimator) self.imputation_sequence_ = [] self.initial_imputer_ = None X, Xt, mask_missing_values, complete_mask = ( self._initial_imputation(X, in_fit=True)) super()._fit_indicator(complete_mask) X_indicator = super()._transform_indicator(complete_mask) if self.max_iter == 0 or np.all(mask_missing_values): self.n_iter_ = 0 return super()._concatenate_indicator(Xt, X_indicator) # Edge case: a single feature. We return the initial ... if Xt.shape[1] == 1: self.n_iter_ = 0 return super()._concatenate_indicator(Xt, X_indicator) self._min_value = self._validate_limit( self.min_value, "min", X.shape[1]) self._max_value = self._validate_limit( self.max_value, "max", X.shape[1]) if not np.all(np.greater(self._max_value, self._min_value)): raise ValueError( "One (or more) features have min_value >= max_value.") # order in which to impute # note this is probably too slow for large feature data (d > 100000) # and a better way would be good. # see: https://goo.gl/KyCNwj and subsequent comments ordered_idx = self._get_ordered_idx(mask_missing_values) self.n_features_with_missing_ = len(ordered_idx) abs_corr_mat = self._get_abs_corr_mat(Xt) n_samples, n_features = Xt.shape if self.verbose > 0: print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,)) start_t = time() if not self.sample_posterior: Xt_previous = Xt.copy() normalized_tol = self.tol * np.max( np.abs(X[~mask_missing_values]) ) for self.n_iter_ in range(1, self.max_iter + 1): if self.imputation_order == 'random': ordered_idx = self._get_ordered_idx(mask_missing_values) for feat_idx in ordered_idx: neighbor_feat_idx = self._get_neighbor_feat_idx(n_features, feat_idx, abs_corr_mat) Xt, estimator = self._impute_one_feature( Xt, mask_missing_values, feat_idx, neighbor_feat_idx, estimator=None, fit_mode=True) estimator_triplet = _ImputerTriplet(feat_idx, neighbor_feat_idx, estimator) self.imputation_sequence_.append(estimator_triplet) if self.verbose > 1: print('[IterativeImputer] Ending imputation round ' '%d/%d, elapsed time %0.2f' % (self.n_iter_, self.max_iter, time() - start_t)) if not self.sample_posterior: inf_norm = np.linalg.norm(Xt - Xt_previous, ord=np.inf, axis=None) if self.verbose > 0: print('[IterativeImputer] ' 'Change: {}, scaled tolerance: {} '.format( inf_norm, normalized_tol)) if inf_norm < normalized_tol: if self.verbose > 0: print('[IterativeImputer] Early stopping criterion ' 'reached.') break Xt_previous = Xt.copy() else: if not self.sample_posterior: warnings.warn("[IterativeImputer] Early stopping criterion not" " reached.", ConvergenceWarning) Xt[~mask_missing_values] = X[~mask_missing_values] return super()._concatenate_indicator(Xt, X_indicator) def transform(self, X): """Imputes all missing values in X. Note that this is stochastic, and that if random_state is not fixed, repeated calls, or permuted input, will yield different results. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data to complete. Returns ------- Xt : array-like, shape (n_samples, n_features) The imputed input data. """ check_is_fitted(self) X, Xt, mask_missing_values, complete_mask = self._initial_imputation(X) X_indicator = super()._transform_indicator(complete_mask) if self.n_iter_ == 0 or np.all(mask_missing_values): return super()._concatenate_indicator(Xt, X_indicator) imputations_per_round = len(self.imputation_sequence_) // self.n_iter_ i_rnd = 0 if self.verbose > 0: print("[IterativeImputer] Completing matrix with shape %s" % (X.shape,)) start_t = time() for it, estimator_triplet in enumerate(self.imputation_sequence_): Xt, _ = self._impute_one_feature( Xt, mask_missing_values, estimator_triplet.feat_idx, estimator_triplet.neighbor_feat_idx, estimator=estimator_triplet.estimator, fit_mode=False ) if not (it + 1) % imputations_per_round: if self.verbose > 1: print('[IterativeImputer] Ending imputation round ' '%d/%d, elapsed time %0.2f' % (i_rnd + 1, self.n_iter_, time() - start_t)) i_rnd += 1 Xt[~mask_missing_values] = X[~mask_missing_values] return super()._concatenate_indicator(Xt, X_indicator) def fit(self, X, y=None): """Fits the imputer on X and return self. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data, where "n_samples" is the number of samples and "n_features" is the number of features. y : ignored Returns ------- self : object Returns self. """ self.fit_transform(X) return self
anntzer/scikit-learn
sklearn/impute/_iterative.py
Python
bsd-3-clause
31,090
[ "Gaussian" ]
96fce4332d89df66467af6a181114ff067b014f5e8b0d31d991395c7d2269c0e
"""Small testing script to test biomajmanager functionality""" from __future__ import print_function import shutil import os import tempfile import time import unittest from nose.plugins.attrib import attr from pymongo import MongoClient from datetime import datetime from biomajmanager.links import Links from biomajmanager.manager import Manager from biomajmanager.news import News, RSS from biomajmanager.plugins import Plugins from biomajmanager.writer import Writer from biomajmanager.utils import Utils __author__ = 'tuco' class UtilsForTests(object): """Copy properties files into a temporary directory and update properties to use a temp directory""" def __init__(self): """Setup the temp dirs and files.""" self.global_properties = None self.manager_properties = None self.manager = None self.mongo_client = None self.mongo_url = None self.db_test = 'bm_db_test' self.col_test = 'bm_col_test' self.test_dir = tempfile.mkdtemp('biomaj-manager_tests') self.no_dir_rights = 16384 self.full_dir_rights = 16895 if 'MONGODB_URI' in os.environ: self.mongo_client = MongoClient(os.environ.get('MONGODB_URI')) self.mongo_url = os.environ.get('MONGODB_URI') if 'MONGODB_DBNAME' in os.environ: self.db_test = os.environ.get('MONGODB_DBNAME') # Set a mongo client. Can be set from global.properties if self.mongo_client is None: self.mongo_client = MongoClient('mongodb://localhost:27017') self.mongo_url = 'mongodb://localhost:27017' # Global part self.conf_dir = os.path.join(self.test_dir, 'conf') if not os.path.exists(self.conf_dir): os.makedirs(self.conf_dir) self.data_dir = os.path.join(self.test_dir, 'data') if not os.path.exists(self.data_dir): os.makedirs(self.data_dir) self.log_dir = os.path.join(self.test_dir, 'log') if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.process_dir = os.path.join(self.test_dir, 'process') if not os.path.exists(self.process_dir): os.makedirs(self.process_dir) self.lock_dir = os.path.join(self.test_dir, 'lock') if not os.path.exists(self.lock_dir): os.makedirs(self.lock_dir) self.cache_dir = os.path.join(self.test_dir, 'cache') if not os.path.exists(self.cache_dir): os.makedirs(self.cache_dir) # Manager part self.template_dir = os.path.join(self.test_dir, 'templates') if not os.path.exists(self.template_dir): os.makedirs(self.template_dir) self.news_dir = os.path.join(self.test_dir, 'news') if not os.path.exists(self.news_dir): os.makedirs(self.news_dir) self.prod_dir = os.path.join(self.test_dir, 'production') if not os.path.exists(self.prod_dir): os.makedirs(self.prod_dir) self.plugins_dir = os.path.join(self.test_dir, 'plugins') if not os.path.exists(self.plugins_dir): os.makedirs(self.plugins_dir) self.tmp_dir = os.path.join(self.test_dir, 'tmp') if not os.path.exists(self.tmp_dir): os.makedirs(self.tmp_dir) if self.global_properties is None: self.__copy_test_global_properties() if self.manager_properties is None: self.__copy_test_manager_properties() def copy_file(self, ofile=None, todir=None): """ Copy a file from the test dir to temp test zone :param ofile: File to copy :param todir: Destinatin directory :return: """ curdir = self.__get_curdir() fromdir = os.path.join(curdir, ofile) todir = os.path.join(todir, ofile) shutil.copyfile(fromdir, todir) def copy_news_files(self): """ Copy news file from test directory to 'news' testing directory :return: """ curdir = self.__get_curdir() for news in ['news1.txt', 'news2.txt', 'news3.txt']: from_news = os.path.join(curdir, news) to_news = os.path.join(self.news_dir, news) shutil.copyfile(from_news, to_news) def copy_plugins(self): """ Copy plugins from test directory to 'plugins' testing directory :return: """ dsrc = 'tests/plugins' for ofile in os.listdir(dsrc): shutil.copyfile(os.path.join(dsrc, ofile), os.path.join(self.plugins_dir, ofile)) def copy_templates(self): """ Copy templates from test directory to 'templates' testing directory :return: """ dsrc = 'tests/templates' for ffile in os.listdir(dsrc): shutil.copyfile(os.path.join(dsrc, ffile), os.path.join(self.template_dir, ffile)) def clean(self): """Deletes temp directory""" shutil.rmtree(self.test_dir) Manager.set_verbose(False) Manager.set_simulate(False) def drop_db(self): """Drop the mongo database after using it and close the connection""" self.mongo_client.drop_database(self.db_test) self.mongo_client.close() def __get_curdir(self): """Get the current directory""" return os.path.dirname(os.path.realpath(__file__)) def __copy_test_manager_properties(self): """Copy manager.properties file to testing directory""" self.manager_properties = os.path.join(self.conf_dir, 'manager.properties') curdir = self.__get_curdir() manager_template = os.path.join(curdir, 'manager.properties') fout = open(self.manager_properties, 'w') with open(manager_template, 'r') as fin: for line in fin: if line.startswith('template.dir'): fout.write("template.dir=%s\n" % self.template_dir) elif line.startswith('news.dir'): fout.write("news.dir=%s\n" % self.news_dir) elif line.startswith('production.dir'): fout.write("production.dir=%s\n" % self.prod_dir) elif line.startswith('plugins.dir'): fout.write("plugins.dir=%s\n" % self.plugins_dir) elif line.startswith('rss.file'): fout.write("rss.file=%s/rss.xml\n" % self.news_dir) else: fout.write(line) fout.close() def __copy_test_global_properties(self): """Copy global.properties file into testing directory""" # Default config file config_file = 'global.properties' curdir = self.__get_curdir() global_template = os.path.join(curdir, config_file) self.global_properties = os.path.join(self.conf_dir, config_file) fout = open(self.global_properties, 'w') with open(global_template, 'r') as fin: for line in fin: if line.startswith('cache.dir'): fout.write("cache.dir=%s\n" % self.cache_dir) elif line.startswith('conf.dir'): fout.write("conf.dir=%s\n" % self.conf_dir) elif line.startswith('log.dir'): fout.write("log.dir=%s\n" % self.log_dir) elif line.startswith('data.dir'): fout.write("data.dir=%s\n" % self.data_dir) elif line.startswith('process.dir'): fout.write("process.dir=%s\n" % self.process_dir) elif line.startswith('lock.dir'): fout.write("lock.dir=%s\n" % self.lock_dir) elif line.startswith('db.url'): fout.write("db.url=%s\n" % self.mongo_url) elif line.startswith('db.name'): fout.write("db.name=%s" % self.db_test) else: fout.write(line) Utils.ok(line) fout.close() class TestBiomajManagerUtils(unittest.TestCase): """Class for testing manager.utils class""" def setUp(self): self.utils = UtilsForTests() def tearDown(self): self.utils.clean() @attr('utils') @attr('utils.cleansymlinks') def test_cleanSymLinksPathArgsWrongThrows(self): """Checks the methods throws when path args is None or does not exist""" with self.assertRaises(SystemExit): Utils.clean_symlinks(path="/does/not/exist") with self.assertRaises(SystemExit): Utils.clean_symlinks(path=None) @attr('utils') @attr('utils.cleansymlinks') def test_cleanSymlinksNoDelete(self): """Checks the method get the correct list of symlinks and report them""" open(os.path.join(self.utils.data_dir, 'news1.txt'), 'a').close() os.symlink(os.path.join(self.utils.data_dir, 'news1.txt'), os.path.join(self.utils.tmp_dir, 'news1.txt')) os.symlink('/tmp/does_not_exist', os.path.join(self.utils.tmp_dir, 'not_found')) self.assertTrue(Utils.clean_symlinks(path=self.utils.tmp_dir, delete=False)) @attr('utils') @attr('utils.cleansymlinks') def test_cleanSymlinksNoDeleteNoDeadLinks(self): """Checks the method get returns True when no dead links found""" Manager.set_verbose(True) self.assertEqual(Utils.clean_symlinks(path=self.utils.tmp_dir, delete=False), 0) Manager.set_verbose(False) @attr('utils') @attr('utils.cleansymlinks') def test_cleanSymlinksWithDelete(self): """Checks the method get the correct list of symlinks and report them""" open(os.path.join(self.utils.data_dir, 'news1.txt'), 'a').close() os.symlink(os.path.join(self.utils.data_dir, 'news1.txt'), os.path.join(self.utils.tmp_dir, 'news1.txt')) os.symlink('/tmp/does_not_exist', os.path.join(self.utils.tmp_dir, 'not_found')) self.assertTrue(Utils.clean_symlinks(path=self.utils.tmp_dir, delete=True)) @attr('utils') @attr('utils.deepestdirs') def test_UtilsDeepestDirErrorNoPath(self): """Check methods checks are OK""" with self.assertRaises(SystemExit): Utils.get_deepest_dirs() @attr('utils') @attr('utils.deepestdirs') def test_DeepestDirErrorPathNotExists(self): """Check methods checks are OK""" with self.assertRaises(SystemExit): Utils.get_deepest_dirs(path='/not_found') @attr('utils') @attr('utils.deepestdir') def test_DeepestDir(self): """Check we get the right deepest dir from a complete path""" tdir = os.path.join(self.utils.tmp_dir, 'a', 'b', 'c') if not os.path.exists(tdir): os.makedirs(tdir) deepest = Utils.get_deepest_dir(tdir) self.assertEqual(deepest, 'c') shutil.rmtree(self.utils.tmp_dir) @attr('utils') @attr('utils.deepestdir') def test_DeepestDirFull(self): """Check we get the right full deepest dir""" tdir = os.path.join(self.utils.tmp_dir, 'a', 'b', 'c', 'd') if not os.path.exists(tdir): os.makedirs(tdir) deepest = Utils.get_deepest_dir(tdir, full=True, limit=1) self.assertEqual(deepest, tdir) deepest = Utils.get_deepest_dir(tdir, full=False, limit=1) self.assertEqual(deepest, os.path.basename(tdir)) shutil.rmtree(self.utils.tmp_dir) @attr('utils') @attr('utils.deepestdir') def test_DeepestDir(self): """Check we get the right list of deepest dir""" dir0 = os.path.join(self.utils.tmp_dir, 'a', 'b') dir1 = os.path.join(dir0, 'c') dir2 = os.path.join(dir0, 'd') for od in [dir1, dir2]: if not os.path.exists(od): os.makedirs(od) deepest = Utils.get_deepest_dir(dir0) self.assertEqual(len(deepest), 1) self.assertEqual(deepest[0], 'c') shutil.rmtree(self.utils.tmp_dir) @attr('utils') @attr('utils.deepestdirs') def test_DeepestDirs(self): """Check we get the right list of deepest dir""" dir0 = os.path.join(self.utils.tmp_dir, 'a', 'b') dir1 = os.path.join(dir0, 'c') dir2 = os.path.join(dir0, 'd') for od in [dir1, dir2]: if not os.path.exists(od): os.makedirs(od) deepest = Utils.get_deepest_dirs(dir0) c1 = deepest[0] d1 = deepest[1] self.assertEqual(c1, 'c') self.assertEqual(d1, 'd') shutil.rmtree(self.utils.tmp_dir) @attr('utils') @attr('utils.getbrokenlinks') def test_getbrokenlinksNoPathThrows(self): """Check it throw when no path given as arg""" utils = Utils() with self.assertRaises(SystemExit): utils.get_broken_links() @attr('utils') @attr('utils.getbrokenlinks') def test_getbrokenlinksWrongPathThrows(self): """Check it throw when path does not exist""" utils = Utils() with self.assertRaises(SystemExit): utils.get_broken_links(path="/does/not/exist") @attr('utils') @attr('utils.getbrokenlinks') def test_getbrokenlinksNoBrokenLinks(self): """Check it returns 0 broken link""" utils = Utils() self.assertEqual(utils.get_broken_links(path=self.utils.tmp_dir), 0) @attr('utils') @attr('utils.getbrokenlinks') def test_getbrokenlinksBrokenLinksOK(self): """Check it returns 1 broken links""" utils = Utils() root = self.utils.tmp_dir link = os.path.join(root, 'foobar') if os.path.islink(link): os.remove(link) os.symlink('/not_found', link) Manager.verbose = True self.assertEqual(utils.get_broken_links(path=root), 1) os.remove(link) @attr('utils') @attr('utils.deepestdirs') def test_DeepestDirsFull(self): """Check we get the right list of deepest dir""" dir0 = os.path.join(self.utils.tmp_dir, 'a', 'b') dir1 = os.path.join(dir0, 'c') dir2 = os.path.join(dir0, 'd') for od in [dir1, dir2]: if not os.path.exists(od): os.makedirs(od) deepest = Utils.get_deepest_dirs(dir0, full=True) c1 = deepest[0] d1 = deepest[1] self.assertEqual(c1, dir1) self.assertEqual(d1, dir2) shutil.rmtree(self.utils.tmp_dir) @attr('utils') @attr('utils.deepestdirs') def test_DeepestDirsFullWithLimit(self): """Check we get the right list of deepest dir using limit in depth""" dir0 = os.path.join(self.utils.tmp_dir, 'a', 'b') dir1 = os.path.join(dir0, 'c') dir2 = os.path.join(dir0, 'd') for od in [dir1, dir2]: if not os.path.exists(od): os.makedirs(od) deepest = Utils.get_deepest_dirs(dir0, full=True, limit=3) c1 = deepest[0] d1 = deepest[1] self.assertEqual(c1, dir1) self.assertEqual(d1, dir2) shutil.rmtree(self.utils.tmp_dir) @attr('utils') @attr('utils.getfiles') def test_GetFiles(self): """Check we get the right file list from a directory""" tmp_file1 = os.path.join(self.utils.tmp_dir, '1foobar.tmp') tmp_file2 = os.path.join(self.utils.tmp_dir, '2foobar.tmp') open(tmp_file1, mode='a').close() open(tmp_file2, mode='a').close() files = Utils.get_files(path=self.utils.tmp_dir) b_tmp_file1 = os.path.basename(tmp_file1) b_tmp_file2 = os.path.basename(tmp_file2) self.assertEqual(b_tmp_file1, files[0]) self.assertEqual(b_tmp_file2, files[1]) shutil.rmtree(self.utils.tmp_dir) @attr('utils') @attr('utils.getfiles') def test_GetFilesErrorNoPath(self): """Check we get an error when omitting args""" with self.assertRaises(SystemExit): Utils.get_files() @attr('utils') @attr('utils.getfiles') def test_GetFilesErrorPathNotExists(self): """Check we get an error when omitting args""" with self.assertRaises(SystemExit): Utils.get_files(path='/not_found') @attr('utils') @attr('utils.getsubtree') def test_GetSubTreeWarnAndReturnsEmptyList(self): """Check the method prints a warning message and returns empty list""" self.assertListEqual(Utils.get_subtree(path=None), []) @attr('utils') @attr('utils.getsubtree') def test_GetSubTreeRetrunsRgihtSubTree(self): """Checks the method returns the right subtree list""" os.makedirs(os.path.join(self.utils.tmp_dir, 'sub', 'a1', 'a2', 'a3')) os.makedirs(os.path.join(self.utils.tmp_dir, 'sub', 'b1', 'b2', 'b3', 'b4')) os.makedirs(os.path.join(self.utils.tmp_dir, 'sub', 'c1', 'c2')) returned = Utils.get_subtree(path=os.path.join(self.utils.tmp_dir, 'sub')) expected = ["a1/a2/a3", "b1/b2/b3/b4", "c1/c2"] self.assertListEqual(sorted(returned), sorted(expected)) @attr('utils') @attr('utils.getsubtree') def test_GetSubTreeRetrunsRgihtSubTreeWithLimit(self): """Checks the method returns the right subtree list and limit is right used""" os.makedirs(os.path.join(self.utils.tmp_dir, 'sub', 'a1', 'a2', 'a3')) os.makedirs(os.path.join(self.utils.tmp_dir, 'sub', 'b1', 'b2', 'b3', 'b4')) os.makedirs(os.path.join(self.utils.tmp_dir, 'sub', 'c1', 'c2')) returned = Utils.get_subtree(path=os.path.join(self.utils.tmp_dir, 'sub'), limit=10) expected = ["a1/a2/a3", "b1/b2/b3/b4", "c1/c2"] self.assertListEqual(sorted(returned), sorted(expected)) @attr('utils') @attr('utils.getnow') def test_UtilsGetNow(self): """Check method returns right time""" now = Utils.time2datefmt(time.time()) self.assertEqual(now, Utils.get_now()) @attr('utils') @attr('utils.elapsedtime') def test_ElapsedTimeError(self): """Check this method throw an error""" with self.assertRaises(SystemExit): Utils.elapsed_time() @attr('utils') @attr('utils.elapsedtime') def test_ElapsedtimeNoTimerStop(self): """Check we go deeper in method setting time_stop to None""" Utils.timer_stop = None Utils.start_timer() self.assertIsInstance(Utils.elapsed_time(), float) @attr('utils') @attr('utils.print') def test_UtilsSayReturnsNone(self): """Check the method returns empty string""" self.assertIsNone(Utils.uprint(None)) @attr('utils') @attr('utils.print') def test_UtilsSayReturnsOK(self): """Check the method returns correct message""" expected = "OK\n" msg = "OK" # Python3 support try: from StringIO import StringIO except ImportError: from io import StringIO out = StringIO() Utils.uprint(msg, to=out) returned = out.getvalue() self.assertEqual(expected, returned) @attr('utils') @attr('utils.time2date') def test_Time2dateNoArgs(self): """Check the method throws an error if no args given""" with self.assertRaises(TypeError): Utils.time2date() @attr('utils') @attr('utils.time2date') def test_Time2dateReturnedOK(self): """Check value returned is right object""" self.assertIsInstance(Utils.time2date(time.time()), datetime) @attr('utils') @attr('utils.time2datefmt') def test_Time2datefmtNoArgs(self): """Check the method throws an error if no args given""" with self.assertRaises(TypeError): Utils.time2datefmt() @attr('utils') @attr('utils.time2datefmt') def test_Time2datefmtReturnedOK(self): """Check value returned is right object""" self.assertIsInstance(Utils.time2datefmt(time.time()), str) @attr('utils') @attr('utils.user') def test_UserUSEROK(self): """Check the testing user is ok with USER""" buser = blogname = None if 'USER' in os.environ: buser = os.environ['USER'] if 'LOGNAME' in os.environ: blogname = os.environ['LOGNAME'] del(os.environ['LOGNAME']) self.assertEqual(Utils.user(), buser) if buser: os.environ['USER'] = buser if blogname: os.environ['LOGNAME'] = blogname @attr('utils') @attr('utils.user') def test_UserLOGNAMEOK(self): """Check testing user is ok with LOGNAME""" logname = os.getenv('LOGNAME') user = os.getenv('USER') del(os.environ['USER']) self.assertEqual(Utils.user(), logname) os.environ['USER'] = user @attr('utils') @attr('utils.user') def test_userNOTOK(self): """Check the testing user is ok""" user = "fakeUser" self.assertNotEqual(Utils.user(), user) # @attr('utils') # @attr('utils.user') # def test_userNoEnvironmentVariableThrows(self): # """Check method throws when none of USER and LOGNAME is available""" # buser = blogname = buname = blname = None # if 'USER' in os.environ: # buser = os.environ['USER'] # del os.environ['USER'] # if 'LOGNAME' in os.environ: # blogname = os.environ['LOGNAME'] # del os.environ['LOGNAME'] # if 'USERNAME' in os.environ: # buname = os.environ['USERNAME'] # del os.environ['USERNAME'] # if 'LNAME' in os.environ: # buname = os.environ['LNAME'] # del os.environ['LNAME'] # with self.assertRaises(SystemExit): # Utils.user() # if buser: # os.environ['USER'] = buser # if blogname: # os.environ['LOGNAME'] = blogname # if buname: # os.environ['USERNAME'] = buname # if blname: # os.environ['LNAME'] = blname @attr('utils') @attr('utils.verbose') def test_UtilsVerboseReturnsOK(self): """Check the verbose method returns correct message""" expected = "[VERBOSE] OK\n" msg = "OK" # Python3 support try: from StringIO import StringIO except ImportError: from io import StringIO out = StringIO() Manager.verbose = True Utils.show_verbose = True Utils.verbose(msg, to=out) returned = out.getvalue() self.assertEqual(expected, returned) Manager.verbose = False Utils.show_verbose = False @attr('utils') @attr('utils.nowarning') def test_UtilsNoWarning(self): """Check warning messages""" import sys serr = sys.stderr try: expected = "[WARNING] OK\n" msg = "OK" try: from StringIO import StringIO except ImportError: from io import StringIO err = StringIO() sys.stderr = err Utils.show_warn = True Utils.warn(msg) returned = err.getvalue() self.assertEqual(expected, returned) finally: sys.stderr = serr @attr('utils') @attr('utils.nowarning') def test_UtilsNoWarningSilent(self): """Check warning messages""" import sys serr = sys.stderr try: Utils.show_warn = False expected = "" msg = "OK" try: from StringIO import StringIO except ImportError: from io import StringIO err = StringIO() sys.stderr = err Utils.show_warn = False Utils.warn(msg) returned = err.getvalue() self.assertEqual(expected, returned) Utils.show_warn = True finally: sys.stderr = serr class TestBiomajManagerWriter(unittest.TestCase): """Class for testing biomajmanager.writer class""" def setUp(self): """Setup stuff""" self.utils = UtilsForTests() self.utils.copy_templates() # Maker out test global.properties set as env var os.environ['BIOMAJ_CONF'] = self.utils.global_properties def tearDown(self): """Finish""" self.utils.clean() @attr('writer') @attr('writer.init') def test_WriterInitNoArgsThrowsException(self): """Check init throws exception with no args""" with self.assertRaises(SystemExit): Writer() @attr('writer') @attr('writer.init') def test_WriterInitOKWithTemplateDirOK(self): """Check object init is OK""" writer = Writer(template_dir=self.utils.template_dir) self.assertIsNone(writer.output) @attr('writer') @attr('writer.init') def test_WriterInitOKWithTemplateDirNotOK(self): """Check object init with false template_dir throws exception""" with self.assertRaises(SystemExit): Writer(template_dir="/not_found") @attr('writer') @attr('writer.init') def test_WriterInitConfigTemplateDirOK(self): """Check object init with config set correct tempalte_dir""" manager = Manager() writer = Writer(config=manager.config) self.assertEqual(manager.config.get('MANAGER', 'template.dir'), writer.template_dir) @attr('writer') @attr('writer.init') def test_WriterInitOKWithConfigNoSection(self): """Check object with config without section throws exception""" manager = Manager() manager.config.remove_section('MANAGER') with self.assertRaises(SystemExit): Writer(config=manager.config) @attr('writer') @attr('writer.init') def test_WriterInitOKWithConfigNoOption(self): """ Check object with config without section throws exception :return: """ manager = Manager() manager.config.remove_option('MANAGER', 'template.dir') with self.assertRaises(SystemExit): Writer(config=manager.config) @attr('writer') @attr('writer.write') def test_WriterWriteNoFileThrowsException(self): """Check method throws exception if not 'file' args passed""" writer = Writer(template_dir=self.utils.template_dir) with self.assertRaises(SystemExit): writer.write() @attr('writer') @attr('writer.write') def test_WriterWriteWrongTemplateFileThrowsException(self): """Check the method throws exception while template file does not exists""" writer = Writer(template_dir=self.utils.template_dir) with self.assertRaises(SystemExit): writer.write(template="doesnotexist.txt") @attr('writer') @attr('writer.write') def test_WriterWriteTemplateFileOKTemplateSyntaxError(self): """Check the method throws exception while template file does not exists""" writer = Writer(template_dir=self.utils.template_dir) with self.assertRaises(SystemExit): writer.write(template="wrong_syntax.txt") @attr('writer') @attr('writer.write') def test_WriterWrtieTemplateFileOKOutputIsNoneOK(self): """Check method prints OK on STDOUT""" writer = Writer(template_dir=self.utils.template_dir) data = {'test': 'working test!'} self.assertTrue(writer.write(template="test.txt", data=data)) @attr('writer') @attr('writer.write') def test_WriterWriteTemplateFileOKContentOK(self): """Check the output file written has right content""" output = os.path.join(self.utils.template_dir, "output.txt") data = {'test': 'working test!'} writer = Writer(template_dir=self.utils.template_dir, output=output) self.assertTrue(writer.write(template="test.txt", data=data)) with open(output, 'r') as of: self.assertEqual("This is just a working test!", of.readline().strip()) @attr('writer') @attr('writer.write') def test_WriterWriteTemplateFileOKOutputThrows(self): """Check the output file is wrong and method throws exception""" output = os.path.join(self.utils.template_dir, "unkown_directory", "output.txt") data = {'test': 'working test!'} writer = Writer(template_dir=self.utils.template_dir, output=output) with self.assertRaises(SystemExit): writer.write(template="test.txt", data=data) class TestBiomajManagerLinks(unittest.TestCase): """Class for testing biomajmanager.links""" def setUp(self): """Setup stuff""" self.utils = UtilsForTests() os.environ['BIOMAJ_CONF'] = self.utils.global_properties # Default switch off simulate and verbose mode for each test Manager.simulate = False Manager.verbose = False # Links need to have a production dir ready, so we do it self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager._current_release = '54' manager.bank.bank['production'].append({'release': '54', 'data_dir': self.utils.data_dir, 'prod_dir': 'alu_54'}) self.utils.manager = manager os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'flat')) os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'uncompressed')) os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'golden')) os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2')) # Create a symlink os.symlink(os.path.join(self.utils.data_dir, 'alu', 'alu_54'), os.path.join(self.utils.data_dir, 'alu', 'current')) self.utils.copy_file(ofile='news1.txt', todir=os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2')) self.utils.copy_file(ofile='news2.txt', todir=os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'golden')) # Default dirs and files to create self.utils.dirs = {'golden': [{'target': 'index/golden'}], 'blast2': [{'target': 'index/blast2'}], 'flat': [{'target': 'ftp'}], 'uncompressed': [{'target': 'release', 'fallback': 'flat'}]} self.utils.files = {'golden': [{'target': 'index/golden'}], 'blast2': [{'target': 'fasta', 'remove_ext': True}, {'target': 'index/blast2'}]} self.utils.clones = {'index': [{'source': 'golden'}, {'source': 'blast2'}]} def tearDown(self): """Clean all""" self.utils.clean() # As we created an entry in the database ('alu'), we clean the database self.utils.drop_db() @attr('links') @attr('links.clonestructure') def test_clonsestructre(self): """Checks method build subtree structure correctly""" links = Links(manager=self.utils.manager) links.manager.set_verbose(True) links._clone_structure(source='blast2', target='index') self.assertTrue(os.path.isfile(os.path.join(self.utils.prod_dir, 'index', 'blast2', 'news1.txt'))) @attr('links') @attr('links.clonestructure') def test_cloneStructureWtihSimulateAndVerbose(self): """Check no target are created""" links = Links(manager=self.utils.manager) links.manager.set_simulate(True) links.manager.set_verbose(True) self.assertTrue(links._clone_structure(source='blast2', target='index')) @attr('links') @attr('links.clonestructure') def test_cloneStructureWithRemoveExt(self): """Check the method add some more created links due to remove_ext option""" links = Links(manager=self.utils.manager) links._clone_structure(source='blast2', target='index', remove_ext=True) self.assertEqual(links.created_links, 2) @attr('links') @attr('links.clonestructure') def test_cloneStructureWithRemoveExtVerboseON(self): """Check the method add some more created links due to remove_ext option""" links = Links(manager=self.utils.manager) links.manager.set_verbose(True) links._clone_structure(source='golden', target='index', remove_ext=True) self.assertEqual(links.created_links, 2) @attr('links') @attr('links.clonestructure') def test_cloneStructureThrowsOSError(self): """Check the method throws exception""" links = Links(manager=self.utils.manager) links.manager.set_verbose(False) os.chmod(self.utils.prod_dir, self.utils.no_dir_rights) with self.assertRaises(SystemExit): links._clone_structure(source='golden', target='index', remove_ext=True) os.chmod(self.utils.prod_dir, self.utils.full_dir_rights) @attr('links') @attr('links.clonestructure') def test_cloneStructureNoFiles(self): """Check the method continue when no files in subtree""" links = Links(manager=self.utils.manager) links.manager.set_verbose(True) os.unlink(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'golden', 'news2.txt')) self.assertTrue(links._clone_structure(source='golden', target='index')) @attr('links') @attr('links.init') def test_LinksInitOK(self): """Check init Links instance is OK""" links = Links(manager=self.utils.manager) self.assertEqual(links.created_links, 0) @attr('links') @attr('links.init') def test_ConstructorThrowsNoCurrentReleaseAvailable(self): """Checks the constructor throws an error if the current release for the bank is not available""" self.utils.manager._current_release = None with self.assertRaises(SystemExit): links = Links(manager=self.utils.manager) @attr('links') @attr('links.init') def test_LinksInitNoManagerThrowsException(self): """Check init instance without manager throws exception""" with self.assertRaises(SystemExit): Links() @attr('links') @attr('links.init') def test_LinksInitWrongManagerInstanceThrows(self): """Check init thorws exception is manager args is not instance of Manager""" with self.assertRaises(SystemExit): Links(manager=self.utils) @attr('links') @attr('links.addlink') def test_LinkAddLinkAddOneOK(self): """Check method increase by 1 ok""" link = Links(manager=self.utils.manager) link.add_link() link.add_link() self.assertEqual(link.add_link(), 3) @attr('links') @attr('links.addlink') def test_LinkAddLinkAddTwoOK(self): """Check method increase correctly with arg""" link = Links(manager=self.utils.manager) link.add_link(inc=1) link.add_link(inc=2) link.add_link(inc=3) self.assertEqual(link.created_links, 6) @attr('links') @attr('links.checklinks') def test_LinksCheckLinksSimulateTrueVerboseFalseOK(self): """Check method returns right number of simulated created links""" links = Links(manager=self.utils.manager) Manager.set_simulate(True) Manager.set_verbose(True) # Check setUp, it creates 3 dirs self.assertEqual(links.check_links(clone_dirs=self.utils.clones, dirs=self.utils.dirs, files=self.utils.files), 10) @attr('links') @attr('links.dolinks') def test_LinksDoLinksThrowsWrongUser(self): """Check method throws exception because user not authorized""" links = Links(manager=self.utils.manager) buser = blogname = None if 'USER' in os.environ: buser = os.environ['USER'] os.environ['USER'] = 'fakeuser' if 'LOGNAME' in os.environ: blogname = os.environ['LOGNAME'] os.environ['LOGNAME'] = 'fakeuser' with self.assertRaises(SystemExit): links.do_links() if buser: os.environ['USER'] = buser if blogname: os.environ['LOGNAME'] = blogname @attr('links') @attr('links.dolinks') def test_LinksDoLinksArgsDirsAndFilesNone(self): """Check method with args set to None, creates the right number of links""" links = Links(manager=self.utils.manager) self.assertEqual(links.do_links(dirs=None, files=self.utils.files, clone_dirs=self.utils.clones), 7) @attr('links') @attr('links.dolinks') def test_LinksDoLinksArgsDirsMatchesSetUp(self): """Check method creates the right number of link passing a list of dirs matching setUp""" links = Links(manager=self.utils.manager) exp_dirs = {'flat': [{'target': 'ftp'}], 'uncompressed': [{'target': 'release'}], 'blast2': [{'target': 'index/blast2'}]} self.assertEqual(links.do_links(dirs=exp_dirs, files=None), 6) @attr('links') @attr('links.dolinks') def test_LinksDoLinksArgsFilesMatchesSetUp(self): """Check method creates the right number of link passing a list of dirs matching setUp""" links = Links(manager=self.utils.manager) # We copy 3 files into a source dir to have 3 more created links calling generate_files_link self.utils.copy_file(ofile='news1.txt', todir=os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2')) self.utils.copy_file(ofile='news2.txt', todir=os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2')) self.utils.copy_file(ofile='news3.txt', todir=os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2')) exp_files = {'blast2': [{'target': 'index/blast2'}]} self.assertEqual(links.do_links(dirs=self.utils.dirs, files=exp_files), 8) @attr('links1') @attr('links.dolinks') def test_LinksDoLinksDirsSetNone(self): """Passed method with args dirs set to None""" links = Links(manager=self.utils.manager) self.assertEqual(links.do_links(dirs=None, files=None, clone_dirs=None), 6) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksNoArgsThrows(self): """Check method throws if not args source given""" link = Links(manager=self.utils.manager) with self.assertRaises(SystemExit): link._prepare_links() @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksSourceOKTargetMissingThrows(self): """Check method throws if source given but no other args""" link = Links(manager=self.utils.manager) with self.assertRaises(SystemExit): link._prepare_links(source=self.utils.data_dir) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksArgsOKConfigDataDirMissingThrows(self): """Check method throws if source given but no other args""" self.utils.manager.config.remove_option('GENERAL', 'data.dir') with self.assertRaises(SystemExit): link = Links(manager=self.utils.manager) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksArgsOKConfigProdDirMissingThrows(self): """Check method throws if source given but no other args""" self.utils.manager.config.remove_option('MANAGER', 'production.dir') with self.assertRaises(SystemExit): link = Links(manager=self.utils.manager) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksRequiresSetDirMissingReturnFalse(self): """Check the method returns False if require set and required dir not here""" links = Links(manager=self.utils.manager) self.assertFalse(links._prepare_links(source='uncompressed', target='flat_test', requires='not_here')) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksArgsOKSourceNotDirReturnsFalse(self): """Check method returns False if data.dir does not exist""" link = Links(manager=self.utils.manager) link.manager.config.set('GENERAL', 'data.dir', '/dir/does_not/') link.manager.set_verbose(True) self.assertFalse(link._prepare_links(source='/exist', target='link_test')) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksArgsOKPermissionDenied(self): """Check method throws when permissions denied to create dir""" link = Links(manager=self.utils.manager) link.manager.set_verbose(True) os.chmod(self.utils.prod_dir, self.utils.no_dir_rights) with self.assertRaises(SystemExit): link._prepare_links(source='uncompressed', target='link_test') os.chmod(self.utils.prod_dir, self.utils.full_dir_rights) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksWithFallbackOK(self): """Check method passes OK if fallback given""" link = Links(manager=self.utils.manager) # Remove uncompressed directory, and fallback to flat os.removedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'uncompressed')) link.manager.set_verbose(True) self.assertTrue(link._prepare_links(source='uncompressed', target='flat_test', fallback='flat')) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksWithFallbackFalse(self): """Check method returns False if fallback given but does not exist either""" link = Links(manager=self.utils.manager) # Remove uncompressed directory, and fallback to flat os.removedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'uncompressed')) os.removedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'flat')) link.manager.set_verbose(True) self.assertFalse(link._prepare_links(source='uncompressed', target='flat_test', fallback='flat')) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksWithFallbackUseDeepestOK(self): """Check method passes OK if fallback given""" link = Links(manager=self.utils.manager) # Remove uncompressed directory, and fallback to flat self.assertTrue(link._prepare_links(source='uncompressed', target='flat_test', get_deepest=True)) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksWithSimulateModeOK(self): """Check method prints in simulate mode""" link = Links(manager=self.utils.manager) link.manager.set_simulate(True) link.manager.set_verbose(True) # Remove uncompressed directory, and fallback to flat self.assertTrue(link._prepare_links(source='uncompressed', target='flat_test')) @attr('links') @attr('links.preparelinks') def test_LinksPrepareLinksMakeTargetDirThrows(self): """Check method throws when making target dir""" link = Links(manager=self.utils.manager) link.manager.set_simulate(False) link.manager.set_verbose(False) # Remove uncompressed directory, and fallback to flat self.assertFalse # with self.assertRaises(SystemExit): # link._prepare_links(source='uncompressed', target='../../../../flat_test') @attr('links') @attr('links.makelinks') def test_LinksMakeLinksNoArgsReturns0(self): """Check the method returns 0 when no 'links' args given""" link = Links(manager=self.utils.manager) self.assertEqual(link._make_links(), 0) @attr('links') @attr('links.makelinks') def test_LinksMakeLinksPathAlreadyExistsReturns0(self): """Check the method returns 0 because source and target already exist""" link = Links(manager=self.utils.manager) source = os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'uncompressed') target = os.path.join(self.utils.prod_dir, 'uncmp_link') os.symlink(os.path.relpath(source, start=target), target) self.assertEqual(0, link._make_links(links=[(source, target)])) os.remove(target) @attr('links') @attr('links.makelinks') def test_LinksMakeLinksPathNotExistsSimulateOnVerboseOnReturns0(self): """Check the method returns 0 because simulate and verbose mode on""" link = Links(manager=self.utils.manager) source = os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'uncompressed') target = os.path.join(self.utils.prod_dir, 'uncmp_link') link.manager.set_simulate(True) link.manager.set_verbose(True) link._prepare_links(source=source, target=target) self.assertEqual(0, link._make_links(links=[(source, target)])) @attr('links') @attr('links.makelinks') def test_LinksMakeLinksPathNotExistsSimulateOnVerboseOffReturns1(self): """Check the method returns 1 because simulate on and verbose off, nothing created but link added as created""" link = Links(manager=self.utils.manager) source = os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'uncompressed') target = os.path.join(self.utils.prod_dir, 'uncmp_link') link.manager.set_simulate(True) link.manager.set_verbose(False) link._prepare_links(source=source, target=target) self.assertEqual(1, link._make_links(links=[(source, target)])) @attr('links') @attr('links.makelinks') def test_LinksMakeLinksPathNotExistsHardTrueThrowsError(self): """Check the method throws an exception (OSError=>SystemExit) with (hard=True)""" link = Links(manager=self.utils.manager) source = os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'uncompressed') target = os.path.join(self.utils.prod_dir, 'uncmp_link') link._prepare_links(source=source, target=target) # We delete the source directory to raise an OSError os.removedirs(target) with self.assertRaises(SystemExit): link._make_links(links=[(source, target)], hard=True) @attr('links') @attr('links.makelinks') def test_LinksMakeLinksPathNotExistsHardFalseThrowsError(self): """Check the method throws an exception (OSError=>SystemExit) with (hard=False)""" link = Links(manager=self.utils.manager) source = os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'uncompressed') target = os.path.join(self.utils.prod_dir, 'uncmp_link') link._prepare_links(source=source, target=target) # We delete the source directory to raise an OSError os.removedirs(target) with self.assertRaises(SystemExit): link._make_links(links=[(source, target)]) @attr('links') @attr('links.generatefileslink') def test_LinksGenerateFilesLink_PrepareLinksReturns0(self): """Check _generate_files_link returns 0 because prepare_links returns > 0""" link = Links(manager=self.utils.manager) source = os.path.join(self.utils.data_dir, 'not_found') target = os.path.join(self.utils.conf_dir, 'not_link') self.assertEqual(0, link._generate_files_link(source=source, target=target)) @attr('links') @attr('links.generatefileslink') def test_LinksGenerateFilesLinkNotNoExtCreatedLinksOKVerboseOn(self): """Check method returns correct number of created links (no_ext=False)""" link = Links(manager=self.utils.manager) # Set our manager verbose mode to on link.manager.set_verbose(True) source_dir = os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'flat') target_dir = os.path.join(self.utils.prod_dir, 'flat_symlink') files = ['file1.txt', 'file2.txt'] # Create files to link for ifile in files: open(os.path.join(source_dir, ifile), 'w').close() # We check we've created 2 link, for file1 and file2 self.assertEqual(2, link._generate_files_link(source='flat', target='flat_symlink')) # We can also check link.source and link.target are equal to our source_dir and target_dir self.assertEqual(os.path.join(self.utils.data_dir, 'alu', 'current', 'flat'), link.source) self.assertEqual(target_dir, link.target) @attr('links') @attr('links.generatefileslink') def test_LinksGenerateFilesLinkNotNoExtCreatedLinksOKVerboseOnSmulateOn(self): """Check method returns correct number of created links (no_ext=False)""" link = Links(manager=self.utils.manager) # Set our manager verbose mode to on link.manager.set_verbose(True) link.manager.set_simulate(True) source_dir = os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'flat') target_dir = os.path.join(self.utils.prod_dir, 'flat_symlink') files = ['file1.txt', 'file2.txt'] # Create list of file to link for ifile in files: open(os.path.join(source_dir, ifile), 'w').close() # We check we've created 2 link, for file1 and file2 self.assertEqual(0, link._generate_files_link(source='flat', target='flat_symlink')) # We can also check link.source and link.target are equal to our source_dir and target_dir self.assertEqual(os.path.join(self.utils.data_dir, 'alu', 'current', 'flat'), link.source) self.assertEqual(target_dir, link.target) @attr('links') @attr('links.generatefileslink') def test_LinksGenerateFilesLinkNotNoExtCreatedLinksOKVerboseOnRemoveExtTrue(self): """Check method returns correct number of created links (remove_ext=True)""" link = Links(manager=self.utils.manager) # Set our manager verbose mode to on link.manager.set_verbose(True) source_dir = os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'flat') target_dir = os.path.join(self.utils.prod_dir, 'flat_symlink') files = ['file1.txt', 'file2.txt'] # Create list of file to link for i_file in files: open(os.path.join(source_dir, i_file), 'w').close() # We check we've created 4 link, for file1 and file2 twice (with and without extension) self.assertEqual(4, link._generate_files_link(source='flat', target='flat_symlink', remove_ext=True)) # We check the created links are OK without the extention (.txt) self.assertTrue(os.path.islink(os.path.join(target_dir, 'file1'))) self.assertTrue(os.path.islink(os.path.join(target_dir, 'file2'))) @attr('links') @attr('links.generatedirlink') def test_LinksGenerateDirLink_PrepareLinksReturns0(self): """Check _generate_files_link returns 0 because prepare_links returns > 0""" link = Links(manager=self.utils.manager) source = os.path.join(self.utils.data_dir, 'not_found') target = os.path.join(self.utils.conf_dir, 'not_link') self.assertEqual(0, link._generate_dir_link(source=source, target=target)) @attr('links') @attr('links.generatedirlink') def test_LinksGenerateDirLink_PrepareLinksReturns0SimulateOnVerobseOn(self): """Check _generate_files_link returns 0 because prepare_links returns > 0""" link = Links(manager=self.utils.manager) # Set our manager verbose mode to on link.manager.set_verbose(True) link.manager.set_simulate(True) source = os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2') target = os.path.join(self.utils.conf_dir, 'blast2_link') self.assertEqual(0, link._generate_dir_link(source=source, target=target)) class TestBiomajManagerNews(unittest.TestCase): """Class for testing biomajmanager.news class""" def setUp(self): """Setup stuff""" self.utils = UtilsForTests() # Make our test global.properties set as env var os.environ['BIOMAJ_CONF'] = self.utils.global_properties def tearDown(self): """Clean""" self.utils.clean() @attr('manager') @attr('manager.news') @attr('manager.news.news') def test_NewWithMaxNews(self): """Check max_news args is OK""" news = News(max_news=10) self.assertEqual(news.max_news, 10) @attr('manager') @attr('manager.news') @attr('manager.news.news') def test_NewWithConfigOK(self): """Check init set everything from config as arg""" manager = Manager() news_dir = manager.config.get('NEWS', 'news.dir') news = News(config=manager.config) self.assertEqual(news_dir, news.news_dir) @attr('manager') @attr('manager.news') @attr('manager.news.news') def test_NewWithConfigNoSection(self): """Check init throws because config has no section 'NEWS'""" manager = Manager() manager.config.remove_section('NEWS') with self.assertRaises(SystemExit): News(config=manager.config) @attr('manager') @attr('manager.news') @attr('manager.news.news') def test_NewWithConfigNoOption(self): """Check init throws because config has no option 'news.dir""" manager = Manager() manager.config.remove_option('NEWS', 'news.dir') with self.assertRaises(SystemExit): News(config=manager.config) @attr('manager') @attr('manager.news') @attr('manager.news.news') def test_NewsNewsDirOK(self): """Check get_news set correct thing""" self.utils.copy_news_files() news = News() news.get_news(news_dir=self.utils.news_dir) self.assertEqual(news.news_dir, self.utils.news_dir) @attr('manager') @attr('manager.news') @attr('manager.news.news') def test_NewsDirNotADirectory(self): """Check the dir given is not a directory""" with self.assertRaises(SystemExit): News(news_dir="/foobar") @attr('manager') @attr('manager.news') @attr('manager.news.news') def test_NewsGetNewsWrongDirectory(self): """Check method throws exception with wrong dir calling get_news""" news = News() with self.assertRaises(SystemExit): news.get_news(news_dir='/not_found') @attr('manager') @attr('manager.news') @attr('manager.news.news') def test_NewsGetNewsNewsDirNotDefined(self): """Check method throws exception while 'news.dir' not defined""" news = News() with self.assertRaises(SystemExit): news.get_news() @attr('manager') @attr('manager.news') @attr('manager.news.news') def test_FileNewsContentEqual(self): """Check the content of 2 generated news files are identical""" self.utils.copy_news_files() data = [] for i in range(1, 4): data.append({'label': 'type' + str(i), 'date': str(i) + '0/12/2015', 'title': 'News%s Title' % str(i), 'text': 'This is text #%s from news%s\n' % (str(i), str(i)), 'item': i - 1}) news = News(news_dir=self.utils.news_dir) news_data = news.get_news() # Compare data if 'news' in news_data: for new in news_data['news']: dat = data.pop() for k in ['label', 'date', 'title', 'text', 'item']: self.assertEqual(dat[k], new[k]) else: raise unittest.E shutil.rmtree(self.utils.news_dir) class TestBiomajManagerRSS(unittest.TestCase): """Class for testing biomajmanager.news.RSS class""" def setUp(self): """Setup stuff""" self.utils = UtilsForTests() # Make our test global.properties set as env var os.environ['BIOMAJ_CONF'] = self.utils.global_properties def tearDown(self): """Clean""" self.utils.clean() @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSWithMaxNews(self): """Check max_news args is OK""" rss = RSS(max_news=10) self.assertEqual(rss.max_news, 10) @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSWithrfileArgs(self): """Check 'rfile' arg is parsed ok from __init__""" rfile = os.path.join(self.utils.news_dir, 'rss.xml') rss = RSS(rss_file=rfile) Manager.verbose = True self.assertEqual(rfile, rss.rss_file) @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSGenerateRssWithrssfileArgs(self): """Check 'rss_file' arg is parsed ok from __init__""" rfile = os.path.join(self.utils.news_dir, 'rss.xml') manager = Manager() rss = RSS(config=manager.config) self.assertTrue(rss.generate_rss(rss_file=rfile)) @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSGenerateRssWithrssfileArgsMissingOptionRaises(self): """Check method throw exception if option missing""" rfile = os.path.join(self.utils.news_dir, 'rss.xml') manager = Manager() manager.config.remove_option('RSS', 'feed.link') rss = RSS(config=manager.config) with self.assertRaises(SystemExit): rss.generate_rss(rss_file=rfile, data={'news': [{'title': "title", 'text': "Some blah", 'item': 1, 'date': "01/01/2000"}]}) @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSGenerateRssWithrDataArgs(self): """Check 'data' arg is parsed ok from __init__""" rfile = os.path.join(self.utils.news_dir, 'rss.xml') manager = Manager() rss = RSS(config=manager.config) self.assertTrue(rss.generate_rss(data={'news': [{'title': 't1', 'item': 1, 'text': 'Hello world', 'date': "10/12/2014"}]})) @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSWithrfileInConfig(self): """Check 'rfile' is taken from config""" rfile = os.path.join(self.utils.news_dir, 'rss.xml') manager = Manager() rss = RSS(config=manager.config) self.assertEqual(rfile, rss.rss_file) @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSNewsDataEmpty(self): """Check method returns True if news.dir is empty""" empty_dir = '/tmp/empty' os.mkdir(empty_dir) manager = Manager() rss = RSS(config=manager.config, news_dir=empty_dir) self.assertTrue(rss.generate_rss()) shutil.rmtree(empty_dir) @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSrfileNoneOK(self): """Check we print to STDOUT""" self.utils.copy_news_files() manager = Manager() rss = RSS(config=manager.config) self.assertTrue(rss.generate_rss()) @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSrfileNonePrintSTDOUT(self): """Check we print to STDOUT has rss.file is not in config""" self.utils.copy_news_files() manager = Manager() # We delete rss.file from section 'RSS' to print to STDOUT manager.config.remove_option('RSS', 'rss.file') rss = RSS(config=manager.config) self.assertTrue(rss.generate_rss()) @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSrfileArgsThrow(self): """Check method throws when OSError""" self.utils.copy_news_files() manager = Manager() # We delete rss.file from section 'RSS' to print to STDOUT manager.config.set('RSS', 'rss.file', '/no_ok') rss = RSS(config=manager.config) with self.assertRaises(SystemExit): rss.generate_rss() @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSDataArgsThrow(self): """Check method throws when no 'news' key in data""" self.utils.copy_news_files() manager = Manager() # We delete rss.file from section 'RSS' to print to STDOUT rss = RSS(config=manager.config) with self.assertRaises(SystemExit): rss.generate_rss(data={'no_news_key': []}) @attr('manager') @attr('manager.news') @attr('manager.news.rss') def test_RSSWithDataArgsEmptyThrow(self): """Check method returns True when data is empty""" self.utils.copy_news_files() manager = Manager() # We delete rss.file from section 'RSS' to print to STDOUT rss = RSS(config=manager.config) self.assertTrue(rss.generate_rss(data={'news': []})) class TestBioMajManagerDecorators(unittest.TestCase): """Class for testing biomajmanager.decorators""" def setUp(self): """Setup stuff""" self.utils = UtilsForTests() # Make our test global.properties set as env var os.environ['BIOMAJ_CONF'] = self.utils.global_properties def tearDown(self): """Clean""" # self.utils.clean() @attr('decorators') @attr('decorators.bankrequired') def test_DecoratorBankRequiredOK(self): """Test we've got a bank name set""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') sections = manager.get_bank_sections('blast2') expected = {'nuc': {'dbs': ['alunuc'], 'sections': ['alunuc1', 'alunuc2']}, 'pro': {'dbs': ['alupro'], 'sections': ['alupro1', 'alupro2']}} self.assertDictContainsSubset(expected, sections) self.utils.drop_db() @attr('decorators') @attr('decorators.bankrequired') def test_DecoratorBankRequiredNotOK(self): """Test we've got a bank name set""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager() with self.assertRaises(SystemExit): manager.get_bank_sections('blast2') self.utils.drop_db() @attr('decorators') @attr('decorators.usergranted') def test_DecoratorsUserGrantedOK(self): """Test the user is granted""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.save_banks_version(bank_file=self.utils.test_dir + '/saved_versions.txt') self.utils.drop_db() @attr('decorators') @attr('decorators.usergranted') def test_DecoratorsUserGrantedNotOK(self): """Test the user is granted""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') # Just change the env LOGNAME do misfit with db user buser = blogname = None if 'USER' in os.environ: buser = os.environ['USER'] os.environ['USER'] = 'fakeuser' if 'LOGNAME' in os.environ: blogname = os.environ['LOGNAME'] os.environ['LOGNAME'] = 'fakeuser' with self.assertRaises(SystemExit): manager.save_banks_version(bank_file=self.utils.test_dir + '/saved_versions.txt') # Reset to the right user name as previously if buser: os.environ['USER'] = buser if blogname: os.environ['LOGNAME'] = blogname self.utils.drop_db() @attr('decorators') @attr('decorators.usergranted') def test_DecoratorsUserGrantedAdminNotSet(self): """Test the user is granted""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') # Unset admin from config file and owner from the bank just created manager.config.set('GENERAL', 'admin', '') manager.bank.bank['properties']['owner'] = '' with self.assertRaises(SystemExit): manager.save_banks_version(bank_file=self.utils.test_dir + '/saved_versions.txt') self.utils.drop_db() @attr('decorators') @attr('decorators.deprecated') def test_DecoratorsDeprecated(self): """Check the call to deprecated method throws""" from biomajmanager.decorators import deprecated @deprecated def def_test(): pass with self.assertRaises(SystemExit): def_test() class TestBioMajManagerManager(unittest.TestCase): """Class for testing biomajmanager.manager class""" def setUp(self): """Setup stuff""" self.utils = UtilsForTests() # Make our test global.properties set as env var os.environ['BIOMAJ_CONF'] = self.utils.global_properties def tearDown(self): """Clean""" self.utils.clean() @attr('manager') @attr('manager.init') def test_ManagerWrongBankThrows(self): """Checks manager throws when bank does not exists""" with self.assertRaises(SystemExit): Manager(bank='DoesNotExist') @attr('manager') @attr('manager.loadconfig') def test_ManagerNoConfigRaisesException(self): """Check an exception is raised while config loading""" with self.assertRaises(SystemExit): Manager(cfg="/no_manager_cfg", global_cfg="/no_global_cfg") @attr('manager') @attr('manager.loadconfig') def test_ManagerGlobalConfigException(self): """Check an exception is raised config loading""" with self.assertRaises(SystemExit): Manager(global_cfg="/no_global_cfg") @attr('manager') @attr('manager.loadconfig') def test_ConfigNoManagerSection(self): """Check we don't have a 'MANAGER' section in our config""" no_sec = 'manager-nomanager-section.properties' self.utils.copy_file(ofile=no_sec, todir=self.utils.conf_dir) cfg = Manager.load_config(cfg=os.path.join(self.utils.conf_dir, no_sec)) self.assertFalse(cfg.has_section('MANAGER')) @attr('manager') @attr('manager.loadconfig') def test_ManagerLoadConfig(self): """Check we can load any configuration file on demand""" for pfile in ['m1.properties', 'm2.properties', 'm3.properties']: self.utils.copy_file(ofile=pfile, todir=self.utils.test_dir) cfg = Manager.load_config(cfg=os.path.join(self.utils.test_dir, pfile)) self.assertTrue(cfg.has_section('MANAGER')) self.assertEqual(cfg.get('MANAGER', 'file.name'), pfile) @attr('manager') @attr('manager.loadconfig') def test_ManagerLoadConfigNOTOK(self): """Check we throw an error when no 'manager.properties' found""" os.remove(os.path.join(self.utils.conf_dir, 'manager.properties')) with self.assertRaises(SystemExit): Manager.load_config(global_cfg=self.utils.global_properties) @attr('manager') @attr('manager.bankinfo') def test_ManagerBankInfo(self): """Check method returns right info about a bank""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.banks.update({'name': 'alu'}, {'$set': {'current': now, 'last_update_session': now, 'properties.type': ['nucleic', 'protein']}, '$push': {'production': {'session': now, 'release': '54', 'remoterelease': '54', 'prod_dir': "alu"}}, }) manager.bank.bank = manager.bank.banks.find_one({'name': 'alu'}) returned = manager.bank_info() expected = {'info': [["Name", "Type(s)", "Last update status", "Published release"], ["alu", "nucleic,protein", Utils.time2datefmt(now), '54']], 'prod': [["Session", "Remote release", "Release", "Directory", "Freeze"], [Utils.time2datefmt(now), '54', '54', os.path.join(manager.bank.config.get('data.dir'), manager.bank.config.get('dir.version'), "alu"), 'no']], 'pend': []} self.assertDictEqual(returned, expected) self.utils.drop_db() @attr('manager') @attr('manager.bankpublished') def test_ManagerBankPublishedTrue(self): """Check a bank is published or not (True)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) # at begining, biomaj create an empty bank entry into Mongodb manager = Manager(bank='alu') # If we do update we need to change 'bank_is_published' call find # and iterate over the cursor to do the same test manager.bank.bank['current'] = True self.assertTrue(manager.bank_is_published()) self.utils.drop_db() @attr('manager') @attr('manager.bankpublished') def test_ManagerBankPublishedFalse(self): """Check a bank is published or not (False)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) # at begining, biomaj create an empty bank entry into Mongodb manager = Manager(bank='alu') # If we do update we need to change 'bank_is_published' call find and # iterate over the cursor to do the same test manager.bank.bank['current'] = None self.assertFalse(manager.bank_is_published()) self.utils.drop_db() @attr('manager') @attr('manager.checkprodsize') def test_checkProductionSizeMaxReleaseNoneProductionNotSetThrows(self): """Checks the methods throws when production not found in bank database""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') del manager.bank.bank['production'] with self.assertRaises(SystemExit): manager.check_production_size() self.utils.drop_db() @attr('manager') @attr('manager.checkprodsize') def test_checkProductionSizeMaxReleaseNoneCurrentSetProductionOverLimit(self): """Checks the methods returns the expected values""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.set_verbose(True) manager.bank.bank['current'] = now manager.bank.bank['production'].append({'session': now, 'release': 1}) manager.bank.bank['production'].append({'session': now + 1, 'release': 2}) manager.bank.bank['production'].append({'session': now + 2, 'release': 3}) manager.bank.bank['production'].append({'session': now + 3, 'release': 4}) expected = ['alu', 3, 1] returned = manager.check_production_size() self.assertEqual(expected, returned) self.utils.drop_db() @attr('manager') @attr('manager.checkprodsize') def test_checkProductionSizeMaxReleaseNoneProductionOK(self): """Checks the methods returns the expected values""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.set_verbose(True) manager.bank.bank['current'] = now manager.bank.bank['production'].append({'session': now, 'release': 1}) manager.bank.bank['production'].append({'session': now + 1, 'release': 2}) # As we have do not exceed the limit, empty list returned expected = [] returned = manager.check_production_size() self.assertEqual(expected, returned) self.utils.drop_db() @attr('manager') @attr('manager.cleansessions') def test_cleanSessionsNoBankPublishedReturnsFalse(self): """Checks method returns False when no 'current' set (get_bank_data_dir)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_simulate(False) self.assertFalse(manager.clean_sessions()) self.utils.drop_db() @attr('manager') @attr('manager.cleansessions') def test_cleanSessionsLastSessionsSetContinueSimulateTrueReturnsTrue(self): """Check we read continue with last_update_session and current set to session id, simulate mode on""" current = time.time() last_run = current + 1 self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_simulate(True) manager.bank.bank['last_update_session'] = last_run manager.bank.bank['current'] = current manager.bank.bank['production'] = [{'session': current, 'release': "54", 'data_dir': self.utils.data_dir}] if 'pending' not in manager.bank.bank: manager.bank.bank['pending'] = [{'id': current - 1, 'release': "56"}] # Create the sessions (Session 'current' needed by "def current_release") sessions = [{'id': current, 'release': "54", 'dir_version': 'alu'}, {'id': last_run, 'release': "55", 'dir_version': 'alu'}, {'id': current - 1, 'release': "56", 'dir_version': 'alu'}] manager.bank.bank['sessions'] = sessions self.assertTrue(manager.clean_sessions()) self.utils.drop_db() @attr('manager') @attr('manager.cleansessions') def test_cleanSessionsDeletedSessionOnDiskReturnsTrue(self): """Check we have still some sessions on disk but marked as deleted""" # Needed for manager.get_bank_data_dir current = time.time() release = 54 minus = 3 deleted = current - minus deleted_rel = release - minus self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_simulate(False) manager.bank.bank['current'] = current manager.bank.bank['production'].append({'session': current, 'release': str(release), 'data_dir': self.utils.data_dir, 'prod_dir': "_".join(['alu', str(release)])}) # Create the sessions (Session 'current' needed by "def current_release") sessions = [{'id': current, 'release': release, 'dir_version': 'alu'}, {'id': current - 1, 'release': deleted_rel, 'dir_version': 'alu', 'deleted': deleted}] manager.bank.bank['sessions'] = sessions # Create the 'on disk' dir on_disk = manager.get_bank_data_dir() os.makedirs(os.path.join(on_disk, 'alu' + "_" + str(deleted_rel))) self.assertTrue(manager.clean_sessions()) self.utils.drop_db() @attr('manager') @attr('manager.cleansessions') def test_cleanSessionsNoDeletedSessionFoundInProductionReturnsTrue(self): """Check we no sessions marked as deleted not on disk but found in production""" # Needed for manager.get_bank_data_dir current = time.time() release = 54 minus = 3 deleted_rel = release - minus self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_simulate(False) manager.bank.bank['current'] = current manager.bank.bank['production'].append({'session': current, 'release': str(release), 'data_dir': self.utils.data_dir, 'prod_dir': "_".join(['alu', str(release)])}) manager.bank.bank['production'].append({'session': current - 1, 'release': str(deleted_rel), 'data_dir': self.utils.data_dir, 'prod_dir': "_".join(['alu', str(deleted_rel)])}) # Create the sessions (Session 'current' needed by "def current_release") sessions = [{'id': current, 'release': str(release), 'dir_version': 'alu'}, {'id': current - 1, 'release': str(deleted_rel), 'dir_version': 'alu'}] manager.bank.bank['sessions'] = sessions self.assertTrue(manager.clean_sessions()) self.utils.drop_db() @attr('manager') @attr('manager.cleansessions') def test_cleanSessionsDeletedSessionNotOnDiskNotFoundInProductionContinueReturnsTrue(self): """Check we have some sessions marked as deleted not on disk and not found in production, uses continue""" # Needed for manager.get_bank_data_dir current = time.time() release = 54 minus = 3 deleted = current - minus deleted_rel = release - minus self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_simulate(False) manager.bank.bank['current'] = current manager.bank.bank['production'].append({'session': current, 'release': str(release), 'data_dir': self.utils.data_dir, 'prod_dir': "_".join(['alu', str(release)])}) manager.bank.bank['production'].append({'session': current - 1, 'release': str(release - 3), 'data_dir': self.utils.data_dir, 'prod_dir': "_".join(['alu', str(release - 3)])}) # Create the sessions (Session 'current' needed by "def current_release") sessions = [{'id': current, 'release': str(release), 'dir_version': 'alu'}, {'id': current - 1, 'release': str(deleted_rel), 'dir_version': 'alu', 'deleted': deleted}, {'id': current - 2, 'release': str(deleted_rel), 'dir_version': 'alu', 'deleted': deleted}] manager.bank.bank['sessions'] = sessions self.assertTrue(manager.clean_sessions()) self.utils.drop_db() @attr('manager') @attr('manager.cleansessions') def test_cleanSessionsNoDeletedSessionInProductionWorkflowStatusFalseInPendingReturnsTrue(self): """Check we have some sessions not marked as deleted, session in production, sessions.workflow_status False and found in pending""" # Needed for manager.get_bank_data_dir current = time.time() release = 54 minus = 3 deleted_rel = release - minus self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_simulate(False) manager.bank.bank['current'] = current manager.bank.bank['production'].append({'session': current, 'release': str(release), 'data_dir': self.utils.data_dir, 'prod_dir': "_".join(['alu', str(release)])}) manager.bank.bank['pending'] = [{'id': current - 2, 'release': str(deleted_rel)}] # Create the sessions (Session 'current' needed by "def current_release") sessions = [{'id': current, 'release': str(release), 'dir_version': 'alu'}, {'id': current - 2, 'release': str(deleted_rel), 'dir_version': 'alu', 'workflow_status': False}] manager.bank.bank['sessions'] = sessions self.assertTrue(manager.clean_sessions()) self.utils.drop_db() @attr('manager') @attr('manager.currentrelease') def test_ManagerGetCurrentRelease_CurrentSet(self): """Check correct release is returned""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() release = str(54) data = {'name': 'alu', 'current': now, 'sessions': [{'id': 1, 'remoterelease': 'R1'}, {'id': now, 'remoterelease': release}] } manager = Manager(bank='alu') manager.bank.bank = data manager._current_release = str(54) self.assertEqual(str(54), manager.current_release()) self.utils.drop_db() @attr('manager') @attr('manager.currentrelease') def test_ManagerGetCurrentRelease_CurrentANDSessions(self): """Check we get the right current release""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() release = 'R54' data = {'name': 'alu', 'current': now, 'sessions': [{'id': 1, 'remoterelease': 'R1'}, {'id': now, 'remoterelease': release}] } manager = Manager(bank='alu') manager.bank.bank = data self.assertEqual(release, manager.current_release()) self.utils.drop_db() @attr('manager') @attr('manager.currentlink') def test_ManagerGetCurrentLinkNOTOK(self): """Check get_current_link throws exception""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') cur_link = manager.get_current_link() self.assertNotEqual(cur_link, '/wrong_curent_link') self.utils.drop_db() @attr('manager') @attr('manager.currentlink') def test_ManagerGetCurrentLinkOK(self): """Check get_current_link throws exception""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') cur_link = manager.get_current_link() self.assertEqual(cur_link, os.path.join(self.utils.data_dir, manager.bank.name, 'current')) self.utils.drop_db() @attr('manager') @attr('manager.futurelink') def test_ManagerGetFutureLinkNOTOK(self): """Check get_future_link throws exception""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') cur_link = manager.get_future_link() self.assertNotEqual(cur_link, '/wrong_future_link') self.utils.drop_db() @attr('manager') @attr('manager.futurelink') def test_ManagerGetFutureLinkOK(self): """Check get_future_link throws exception""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') cur_link = manager.get_future_link() self.assertEqual(cur_link, os.path.join(self.utils.data_dir, manager.bank.name, 'future_release')) self.utils.drop_db() @attr('manager') @attr('manager.hascurrentlink') def test_ManagerHasCurrentLinkFalse(self): """Check has_current_link returns False""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') self.assertFalse(manager.has_current_link()) self.utils.drop_db() @attr('manager') @attr('manager.hascurrentlink') def test_ManagerHasCurrentLinkIsLinkTrue(self): """Check has_current_link returns True""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') link = os.path.join(self.utils.data_dir) os.symlink(os.path.join(link), 'test_link') self.assertTrue(manager.has_current_link(link='test_link')) os.remove('test_link') self.utils.drop_db() @attr('manager') @attr('manager.hasfuturelink') def test_ManagerHasFutureLinkFalse(self): """Check has_future_link returns False""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') self.assertFalse(manager.has_future_link()) self.utils.drop_db() @attr('manager') @attr('manager.hasfuturelink') def test_ManagerHasFutureLinkIsLinkOK(self): """Check has_future_link returns future link""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') link = os.path.join(self.utils.data_dir) os.symlink(os.path.join(link), 'future_link') self.assertTrue(manager.has_future_link(link='future_link')) os.remove('future_link') self.utils.drop_db() @attr('manager') @attr('manager.lastsessionfailed') def test_ManagerLastSessionFailedNoLastUpdateSession(self): """Check method returns False when no 'last_update_session' field in database""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') self.assertFalse(manager.last_session_failed()) self.utils.drop_db() @attr('manager') @attr('manager.lastsessionfailed') def test_ManagerLastSessionFailedNoSessionsThrows(self): """Check method throws when no 'sessions' in bank""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['last_update_session'] = now del(manager.bank.bank['sessions']) self.assertTrue(manager.last_session_failed()) @attr('manager') @attr('manager.lastsessionfailed') def test_ManagerLastSessionFailedStatusOverWorkflowStatusTrue(self): """Check method returns False when no 'last_update_session' field in database""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() data = {'name': 'alu', 'sessions': [{'id': 0, 'workflow_status': False}, {'id': now, 'workflow_status': True}], 'last_update_session': now, } manager = Manager(bank='alu') manager.bank.bank = data self.assertFalse(manager.last_session_failed()) self.utils.drop_db() @attr('manager') @attr('manager.lastsessionfailed') def test_ManagerLastSessionFailedNoWorkflowStatusTrue(self): """Check method returns True when no 'workflow_status' in 'session'""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() data = {'name': 'alu', 'sessions': [{'id': 0}, {'id': now}], 'last_update_session': now, } manager = Manager(bank='alu') manager.bank.bank = data self.assertTrue(manager.last_session_failed()) self.utils.drop_db() @attr('manager') @attr('manager.lastsessionfailed') def test_ManagerLastSessionFailedWorkflowStatusFalse(self): """Check method returns False when no 'last_update_session' field in database""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() data = {'name': 'alu', 'sessions': [{'id': 0, 'workflow_status': False}, {'id': now, 'workflow_status': False}], 'last_update_session': now, } manager = Manager(bank='alu') manager.bank.bank = data self.assertTrue(manager.last_session_failed()) self.utils.drop_db() @attr('manager') @attr('manager.lastsessionfailed') def test_ManagerLastSessionFailedFalseNoPendingFalse(self): """Check we have a failed session and no pending session(s)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() data = {'name': 'alu', 'sessions': [{'id': 0, 'workflow_status': True}, {'id': now, 'workflow_status': True}], 'last_update_session': now, } manager = Manager(bank='alu') manager.bank.bank = data self.assertFalse(manager.last_session_failed()) self.utils.drop_db() @attr('manager') @attr('manager.lastsessionfailed') def test_ManagerLastSessionFailedTrueNoPendingTrue(self): """Check we have a failed session and no pending session(s)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() data = {'name': 'alu', 'sessions': [{'id': 0, 'workflow_status': True}, {'id': now, 'workflow_status': True}], 'last_update_session': now, 'pending': [{'release': '12345', 'id': now}] } manager = Manager(bank='alu') manager.bank.bank = data Utils.show_warn = False Manager.set_verbose(True) self.assertTrue(manager.last_session_failed()) self.utils.drop_db() @attr('manager') @attr('manager.lastsessionfailed') def test_ManagerLastSessionFailedTrueNoPendingFalse(self): """Check we have a failed session and no pending session(s)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() data = {'name': 'alu', 'sessions': [{'id': 0, 'workflow_status': True}, {'id': now, 'workflow_status': False}], 'last_update_session': now, } manager = Manager(bank='alu') manager.bank.bank = data Utils.show_warn = False self.assertTrue(manager.last_session_failed()) self.utils.drop_db() @attr('manager') @attr('manager.formats') def test_ManagerBankHasFormatNoFormat(self): """Check missing arg raises error""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') with self.assertRaises(SystemExit): manager.has_formats() self.utils.drop_db() @attr('manager') @attr('manager.formats') def test_ManagerBankHasFormatsTrue(self): """Check if the bank has a specific format (True)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') self.assertTrue(manager.has_formats(fmt='blast')) self.utils.drop_db() @attr('manager') @attr('manager.formats') def test_ManagerBankHasFormatsFalse(self): """Check if the bank has a specific format (False)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') self.assertFalse(manager.has_formats(fmt='unknown')) self.utils.drop_db() @attr('manager') @attr('manager.formats') def test_ManagerBankFormatsFlatFalseOK(self): """Check if the bank has a specific format (True)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') returned = manager.formats() expected = ['blast@2.2.26', 'fasta@3.6'] self.assertListEqual(returned, expected) self.utils.drop_db() @attr('manager') @attr('manager.formats') def test_ManagerBankFormatsFlatTrueOK(self): """Check if the bank has a specific format (True)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') returned = manager.formats(flat=True) expected = {'blast': ['2.2.26'], 'fasta': ['3.6']} self.assertDictEqual(returned, expected) self.utils.drop_db() @attr('manager') @attr('manager.formats') def test_ManagerBankFormatsAsStringOK(self): """Check if the bank has a specific format returned as string (True)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') returned = manager.formats_as_string() expected = {'blast': ['2.2.26'], 'fasta': ['3.6']} self.assertDictEqual(returned, expected) self.utils.drop_db() @attr('manager') @attr('manager.formatsavailable') def test_ManagerFormatsAvailableRetunsOK(self): """Checks the methods returns the correct list of available formats/indexes""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') returned = manager.formats_available() expected = ['blast', 'fasta'] self.assertListEqual(expected, returned) self.utils.drop_db() @attr('manager') @attr('manager.getcurrentuser') def test_ManagerGetCurrentUserTestUSEROK(self): """Check we can get USER from environ with LOGNAME unset""" blogname = None if 'LOGNAME' in os.environ: blogname = os.environ['LOGNAME'] del os.environ['LOGNAME'] user = os.getenv('USER') manager = Manager() self.assertEqual(manager.get_current_user(), user) if 'LOGNAME' not in os.environ: os.environ['LOGNAME'] = blogname # @attr('manager') # @attr('manager.getcurrentuser') # def test_ManagerGetCurrentUserTestUserIsNone(self): # """Check method throws exception when env LOGNAME and USER not found""" # manager = Manager() # backup = os.environ.copy() # os.environ = {} # self.assertIsNone(manager.get_current_user()) # os.environ = backup @attr('manager') @attr('manager.getfailedprocess') def test_ManagerGetFailedProcessNoKeySessionsThrows(self): """Checks the method throws SystemExit when no 'sessions' JSON key found in bank database""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') del(manager.bank.bank['sessions']) with self.assertRaises(SystemExit): manager.get_failed_processes() @attr('manager') @attr('manager.getfailedprocess') def test_ManagerGetFailedProcessWithoutArgsReturnsEmptyList(self): """Check the method returns an empty list when no args passed""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') self.assertListEqual(manager.get_failed_processes(), []) @attr('manager') @attr('manager.getfailedprocess') def test_ManagerGetFailedProcessWithArgSessionIDArgReturnsEmptyList(self): """Checks the method returns an empty list when a session id is passed as arg and not found""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.bank.bank['sessions'].append({'id': 1}) self.assertListEqual(manager.get_failed_processes(session_id=2), []) @attr('manager') @attr('manager.getfailedprocess') def test_ManagerGetFailedProcessWithSessionIDArgCheckStatusReturnsList(self): """Checks the method returns an empty list when a session id is passed as arg and found, as sessions.status false""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.bank.bank['sessions'].append({'id': 1, 'workflow_status': True}) manager.bank.bank['sessions'].append({'id': 2, 'workflow_status': False, 'status': {'init': False, 'postprocess': False}, 'release': "51"}) self.assertEqual(len(manager.get_failed_processes(session_id=2)), 1) self.assertEqual(len(manager.get_failed_processes(session_id=2, full=True)), 1) @attr('manager') @attr('manager.getfailedprocess') def test_ManagerGetFailedProcessCheckPostProcessReturnsNonEmptyList(self): """Check the method passes all the 'sessions' conditions in the loop, with and w/o full + session_id""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') sessions = [ {'id': 1, 'process': {'postprocess': {'UNCOMPRESS': {'UNZIP': {'gunzip': True}}}}}, {'id': 2, 'process': {'postprocess': {'UNCOMPRESS': {'UNZIP': {'gunzip': True}}, 'FORMAT': {'F2FN': {'f2fn': False}}}}}, {'id': 3, 'process': {'postprocess': {'UNCOMPRESS': {'UNZIP': {'gunzip': True}}, 'FORMAT': {'F2FN': {'f2fn': False}, 'F2FP': {'f2fp': False}}}}, 'release': "52"} ] manager.bank.bank['sessions'] = sessions self.assertEqual(len(manager.get_failed_processes(session_id=1)), 0) self.assertEqual(len(manager.get_failed_processes(session_id=2)), 1) self.assertEqual(len(manager.get_failed_processes(session_id=3, full=True)), 12) @attr('manager') @attr('manager.getproductiondir') def test_ManagerGetProductionDirThorwsOK(self): """Check the method throws when no 'production.dir' set in config""" manager = Manager() manager.config.remove_option('MANAGER', 'production.dir') with self.assertRaises(SystemExit): manager.get_production_dir() @attr('manager') @attr('manager.getproductiondir') def test_ManagerGetProductionDirOK(self): """Check the method get the right value""" manager = Manager() expected = manager.config.get('MANAGER', 'production.dir') returned = manager.get_production_dir() self.assertEqual(returned, expected) @attr('manager') @attr('manager.getbankremoteinfo') def test_ManagerGetBankRemoteInfoArgsNone(self): """Check method works correctly without error(s)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') expected = [['db.name', 'alu'], ['protocol', 'ftp'], ['server', 'ftp.wip.ncbi.nlm.nih.gov'], ['remote.dir', '/blast/db/FASTA/'], ['files.num.threads', '2'], ['extract.threads', '2']] self.assertListEqual(manager.get_bank_remote_info(), expected) @attr('manager') @attr('manager.getbankremoteinfo') def test_ManagerGetBankRemoteInfoWarnFieldNotFound(self): """Check the method handle warn no config field found""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') # db.name is added automatically if not found in field list self.assertEqual(len(manager.get_bank_remote_info(fields=['notfound'])), 1) @attr('manager') @attr('manager.getbankremoteinfo') def test_ManagerGetBankRemoteInfoNoneValueHandleProtcolMulti(self): """Check the method handle None value and throws in case of config error""" self.utils.copy_file(ofile='multi.properties', todir=self.utils.conf_dir) manager = Manager(bank='multi') # db.name is added automatically if not found in field list self.assertEqual(len(manager.get_bank_remote_info()), 11) @attr('manager') @attr('manager.getsessionfromid') def test_ManagerGetSessionFromIDNotNoneNotNone(self): """Check we retrieve the right session id (Not None)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) data = {'name': 'alu', 'sessions': [{'id': 1, 'workflow_status': True}, {'id': 2, 'workflow_status': True}]} manager = Manager(bank='alu') manager.bank.bank = data self.assertIsNotNone(manager.get_session_from_id(1)) self.utils.drop_db() @attr('manager') @attr('manager.getsessionfromid') def test_ManagerGetSessionFromIDNotNone(self): """Check we retrieve the right session id (None)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) data = {'name': 'alu', 'sessions': [{'id': 1, 'workflow_status': True}, {'id': 2, 'workflow_status': True}]} manager = Manager(bank='alu') manager.bank.bank = data self.assertIsNone(manager.get_session_from_id(3)) self.utils.drop_db() @attr('manager') @attr('manager.getsessionfromid') def test_ManagerGetSessionFromIDNone(self): """Check method raises exception""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) data = {'name': 'alu', 'sessions': [{'id': 1, 'workflow_status': True}, {'id': 2, 'workflow_status': True}]} manager = Manager(bank='alu') manager.bank.bank = data with self.assertRaises(SystemExit): manager.get_session_from_id(None) self.utils.drop_db() @attr('manager.getpendingsessions') def test_ManagerGetPendingSessionsOK(self): """Check method returns correct pending session""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') expected = [{'release': 54, 'id': now}, {'release': 55, 'id': now + 1}] manager.bank.bank['pending'] = expected pendings = manager.get_pending_sessions() self.assertListEqual(expected, pendings) self.utils.drop_db() @attr('manager') @attr('manager.getpublishedrelease') def test_ManagerGetPublishedReleaseNotNone(self): """Check we get a the published release (NotNone)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() release = 'R54' data = {'name': 'alu', 'current': now, 'sessions': [{'id': 1, 'remoterelease': 'R1'}, {'id': now, 'remoterelease': release}] } manager = Manager(bank='alu') manager.bank.bank = data rel = manager.get_published_release() self.assertIsNotNone(rel) self.utils.drop_db() @attr('manager') @attr('manager.getpublishedrelease') def test_ManagerGetPublishedReleaseNone(self): """Check we get a the published release (None)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() release = 'R54' data = {'name': 'alu', 'sessions': [{'id': 1, 'remoterelease': 'R1'}, {'id': now, 'remoterelease': release}] } manager = Manager(bank='alu') manager.bank.bank = data rel = manager.get_published_release() self.assertIsNone(rel) self.utils.drop_db() @attr('manager') @attr('manager.getpublishedrelease') def test_ManagerGetPublishedReleaseRaisesOK(self): """Check method raises an exception""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() data = {'name': 'alu', 'current': now, 'sessions': [{'id': 1}, {'id': now}] } manager = Manager(bank='alu') manager.bank.bank = data with self.assertRaises(SystemExit): manager.get_published_release() self.utils.drop_db() @attr('manager') @attr('manager.sections') def test_ManagerGetDictSections(self): """Get sections for a bank""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') returned = manager.get_bank_sections(tool='blast2') expected = {'pro': {'dbs': ['alupro'], 'sections': ['alupro1', 'alupro2']}, 'nuc': {'dbs': ['alunuc'], 'sections': ['alunuc1', 'alunuc2']}} self.assertDictContainsSubset(expected, returned) self.utils.drop_db() @attr('manager') @attr('manager.sections') def test_ManagerGetDictSectionsOnlySectionsOK(self): """Test we've got only sections not db from bank properties""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') returned = manager.get_bank_sections(tool='golden') expected = {'nuc': {'sections': ['alunuc'], 'dbs': []}, 'pro': {'sections': ['alupro'], 'dbs': []}} self.assertDictContainsSubset(expected, returned) self.utils.drop_db() @attr('manager') @attr('manager.sections') def test_ManagerGetDictSectionsNoTool(self): """Get sections for a bank""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') with self.assertRaises(SystemExit): manager.get_bank_sections() self.utils.drop_db() @attr('manager') @attr('manager.showpendingsessions') def test_ManagerShowPendingSessionsOK(self): """Check method returns correct pending session""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') expected = [{'release': 54, 'id': now}, {'release': 55, 'id': now + 1}] manager.bank.bank['pending'] = expected pendings = manager.show_pending_sessions() self.assertListEqual(expected, pendings) self.utils.drop_db() @attr('manager') @attr('manager.synchronizedb') def test_ManagerSynchDBMissingConfKeyThrows(self): """Checks the method throws when some config keys are missing""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.config.remove_option('MANAGER', 'synchrodb.delete.dir') with self.assertRaises(SystemExit): manager.synchronize_db() manager.config.set('MANAGER', 'synchrodb.delete.dir', 'auto') manager.config.remove_option('MANAGER', 'synchrodb.set.sessions.deleted') with self.assertRaises(SystemExit): manager.synchronize_db() self.utils.drop_db() @attr('manager') @attr('manager.synchronizedb') def test_ManagerSynchDBWrongConfKeyThrows(self): """Checks the method throws when some config keys are wrong""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.config.set('MANAGER', 'synchrodb.delete.dir', 'unknown') with self.assertRaises(SystemExit): manager.synchronize_db() manager.config.set('MANAGER', 'synchrodb.delete.dir', 'auto') manager.config.set('MANAGER', 'synchrodb.set.sessions.deleted', 'unknown') with self.assertRaises(SystemExit): manager.synchronize_db() self.utils.drop_db() @attr('manager') @attr('manager.synchronizedb') def test_ManagerSynchDBReturnsFalseNoCurrentRelease(self): """Check method returns False as we do not have a 'current' release set (get_bank_data_dir())""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_simulate(True) self.assertFalse(manager.synchronize_db()) # Simulate mode off, delete.dir=manual as set.sessions.deleted Manager.set_simulate(False) manager.config.set('MANAGER', 'synchrodb.delete.dir', 'manual') manager.config.set('MANAGER', 'synchrodb.set.sessions.deleted', 'manual') # Throws as 'synchrodb.set.sessions.deleted'=manual and no args passed to method with self.assertRaises(SystemExit): manager.synchronize_db() # Returns False as get_bank_data_dir returns False, no 'current' release set self.assertFalse(manager.synchronize_db(date_deleted="2016/01/01")) self.utils.drop_db() @attr('manager') @attr('manager.synchronizedb') def test_ManagerSynchDBWithCurrentReleaseAndPendingsAndMissingProductionSimulateONReturnsTrue(self): """Do some tests inside loop over productions. Simulate mode ON returns True""" # sid 1: current release, in prod, on disk # sid 2: in prod, on disk, wf_status False, in pending # sid 3: in prod, on disk, wf_status True, deleted # sid 4: in prod, on disk, not in session self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_verbose(True) # Needed for call get_bank_data_dir() # Set production production_data = [{'data_dir': self.utils.data_dir, 'release': "1", 'dir_version': "alu", 'session': 1, 'prod_dir': "alu_1"}, {'data_dir': self.utils.data_dir, 'release': "2", 'dir_version': "alu", 'session': 2, 'prod_dir': "alu_2"}, {'data_dir': self.utils.data_dir, 'release': "3", 'dir_version': "alu", 'session': 3, 'prod_dir': "alu_3"}, {'data_dir': self.utils.data_dir, 'release': "3", 'dir_version': "alu", 'session': 4, 'prod_dir': "alu_4"}, {'data_dir': self.utils.data_dir, 'release': "10", 'dir_version': "alu", 'session': 10, 'prod_dir': "alu_10"}] # Set sessions sessions_data = [{'id': 1, 'workflow_status': True, 'release': "1"}, {'id': 2, 'workflow_status': False, 'release': "2"}, {'id': 3, 'workflow_status': True, 'deleted': 2, 'release': "3"}] # Set pendings pending_data = [{'id': 3, 'release': "3"}, {'id': 2, 'release': "2"}] manager.bank.bank['last_update_session'] = 3 manager.bank.bank['current'] = 1 manager.bank.bank['production'] = production_data manager.bank.bank['sessions'] = sessions_data manager.bank.bank['pending'] = pending_data # Need to create some release directory to be tested, it is the current release (set in db and production) for i in range(1, 6, 1): release_dir = os.path.join(self.utils.data_dir, 'alu', 'alu_' + str(i)) os.makedirs(release_dir) # Create a 'current' dir os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'current')) # Force auto_delete to False Manager.set_simulate(True) self.assertTrue(manager.synchronize_db()) self.utils.drop_db() Manager.set_simulate(False) @attr('manager') @attr('manager.synchronizedb') def test_ManagerSynchDBWithCurrentReleaseAndPendingsAndMissingProductionSimulateOFFReturnsTrue(self): """Do some tests inside loop over productions. Simulate mode OFF returns True""" # sid 1: current release, in prod, on disk # sid 2: in prod, on disk, wf_status False, in pending # sid 3: in prod, on disk, wf_status True, deleted # sid 4: in prod, on disk, not in session self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_verbose(True) # Needed for call get_bank_data_dir() # Set production production_data = [{'data_dir': self.utils.data_dir, 'release': "1", 'dir_version': "alu", 'session': 1, 'prod_dir': "alu_1"}, {'data_dir': self.utils.data_dir, 'release': "2", 'dir_version': "alu", 'session': 2, 'prod_dir': "alu_2"}, {'data_dir': self.utils.data_dir, 'release': "3", 'dir_version': "alu", 'session': 3, 'prod_dir': "alu_3"}, {'data_dir': self.utils.data_dir, 'release': "3", 'dir_version': "alu", 'session': 4, 'prod_dir': "alu_4"}, {'data_dir': self.utils.data_dir, 'release': "10", 'dir_version': "alu", 'session': 10, 'prod_dir': "alu_10"}] # Set sessions sessions_data = [{'id': 1, 'workflow_status': True, 'release': "1"}, {'id': 2, 'workflow_status': False, 'release': "2"}, {'id': 3, 'workflow_status': True, 'deleted': 2, 'release': "3"}] # Set pendings pending_data = [{'id': 3, 'release': "3"}, {'id': 2, 'release': "2"}] manager.bank.bank['last_update_session'] = 3 manager.bank.bank['current'] = 1 manager.bank.bank['production'] = production_data manager.bank.bank['sessions'] = sessions_data manager.bank.bank['pending'] = pending_data # Need to create some release directory to be tested, it is the current release (set in db and production) for i in range(1, 6, 1): release_dir = os.path.join(self.utils.data_dir, 'alu', 'alu_' + str(i)) os.makedirs(release_dir) # Create a 'current' dir os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'current')) # Set auto_delete to True Manager.set_simulate(False) self.assertTrue(manager.synchronize_db()) self.utils.drop_db() @attr('manager') @attr('manager.synchronizedb') def test_ManagerSynchDBWithCurrentReleaseAndPendingsAndMissingProductionSimulateOFFPrintsWarningReturnsTrue(self): """Do some tests inside loop over productions. Simulate mode OFF, prints [%s] Release %s ok in production and on disk, but sessions.workflow_status is False returns True""" # sid 1: current release, in prod, on disk # sid 2: in prod, on disk, wf_status False, in pending # sid 3: in prod, on disk, wf_status True, deleted # sid 4: in prod, on disk, not in session self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_verbose(True) # Needed for call get_bank_data_dir() # Set production production_data = [{'data_dir': self.utils.data_dir, 'release': "1", 'dir_version': "alu", 'session': 1, 'prod_dir': "alu_1"}, {'data_dir': self.utils.data_dir, 'release': "2", 'dir_version': "alu", 'session': 2, 'prod_dir': "alu_2"}, {'data_dir': self.utils.data_dir, 'release': "3", 'dir_version': "alu", 'session': 3, 'prod_dir': "alu_3"}, {'data_dir': self.utils.data_dir, 'release': "3", 'dir_version': "alu", 'session': 4, 'prod_dir': "alu_4"}, {'data_dir': self.utils.data_dir, 'release': "10", 'dir_version': "alu", 'session': 10, 'prod_dir': "alu_10"}] # Set sessions sessions_data = [{'id': 1, 'workflow_status': True, 'release': "1"}, {'id': 2, 'workflow_status': False, 'release': "2"}, {'id': 3, 'workflow_status': True, 'deleted': 2, 'release': "3"}] # Set pendings pending_data = [{'id': 3, 'release': "3"}, {'id': 1, 'release': "1"}] manager.bank.bank['last_update_session'] = 3 manager.bank.bank['current'] = 1 manager.bank.bank['production'] = production_data manager.bank.bank['sessions'] = sessions_data manager.bank.bank['pending'] = pending_data # Need to create some release directory to be tested, it is the current release (set in db and production) for i in range(1, 6, 1): release_dir = os.path.join(self.utils.data_dir, 'alu', 'alu_' + str(i)) os.makedirs(release_dir) # Create a 'current' dir os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'current')) # Set auto_delete to True Manager.set_simulate(False) self.assertTrue(manager.synchronize_db()) self.utils.drop_db() @attr('manager') @attr('manager.synchronizedb') def test_ManagerSynchDBWithCurrentReleaseAndRunningUpdateAndMissingProductionSimulateONUpdateRunningStatusTrue(self): """Do some tests inside loop over productions. Simulate mode ON, update running and known release yet""" # sid 1: current release, in prod, on disk # sid 2: in prod, on disk, wf_status False, in pending # sid 3: in prod, on disk, wf_status not here, status here # sid 4: in prod, on disk, wf_status True, deleted # sid 5: in prod, on disk, not in session self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_verbose(True) # Needed for call get_bank_data_dir() # Set production production_data = [{'data_dir': self.utils.data_dir, 'release': "1", 'dir_version': "alu", 'session': 1, 'prod_dir': "alu_1"}, {'data_dir': self.utils.data_dir, 'release': "2", 'dir_version': "alu", 'session': 2, 'prod_dir': "alu_2"}, {'data_dir': self.utils.data_dir, 'release': "3", 'dir_version': "alu", 'session': 3, 'prod_dir': "alu_3"}, {'data_dir': self.utils.data_dir, 'release': "3", 'dir_version': "alu", 'session': 4, 'prod_dir': "alu_4"}, {'data_dir': self.utils.data_dir, 'release': "10", 'dir_version': "alu", 'session': 10, 'prod_dir': "alu_10"}] # Set sessions sessions_data = [{'id': 1, 'workflow_status': True, 'release': "1"}, {'id': 2, 'workflow_status': False, 'release': "2"}, {'id': 3, 'workflow_status': True, 'deleted': 2, 'release': "3"}] # Set pendings pending_data = [{'id': 3, 'release': "3"}, {'id': 2, 'release': "2"}] # Set status status_data = {'over': {'status': False, 'progress': None}, 'release': {'status': True, 'progress': 10}} manager.bank.bank['last_update_session'] = 3 manager.bank.bank['current'] = 1 manager.bank.bank['status'] = status_data manager.bank.bank['production'] = production_data manager.bank.bank['sessions'] = sessions_data manager.bank.bank['pending'] = pending_data # Need to create some release directory to be tested, it is the current release (set in db and production) for i in range(1, 6, 1): release_dir = os.path.join(self.utils.data_dir, 'alu', 'alu_' + str(i)) os.makedirs(release_dir) # Create a 'current' dir os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'current')) # Force auto_delete to False Manager.set_simulate(True) self.assertTrue(manager.synchronize_db()) self.utils.drop_db() Manager.set_simulate(False) @attr('manager') @attr('manager.synchronizedb') def test_ManagerSynchDBWithCurrentReleaseAndRunningUpdateAndMissingProductionSimulateONUpdateRunningStatusFalse(self): """Do some tests inside loop over productions. Simulate mode ON, update is running and release not know yet""" # sid 1: current release, in prod, on disk # sid 2: in prod, on disk, wf_status False, in pending # sid 3: in prod, on disk, wf_status not here, status here # sid 4: in prod, on disk, wf_status True, deleted # sid 5: in prod, on disk, not in session self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_verbose(True) # Needed for call get_bank_data_dir() # Set production production_data = [{'data_dir': self.utils.data_dir, 'release': "1", 'dir_version': "alu", 'session': 1, 'prod_dir': "alu_1"}, {'data_dir': self.utils.data_dir, 'release': "2", 'dir_version': "alu", 'session': 2, 'prod_dir': "alu_2"}, {'data_dir': self.utils.data_dir, 'release': "3", 'dir_version': "alu", 'session': 3, 'prod_dir': "alu_3"}, {'data_dir': self.utils.data_dir, 'release': "3", 'dir_version': "alu", 'session': 4, 'prod_dir': "alu_4"}, {'data_dir': self.utils.data_dir, 'release': "10", 'dir_version': "alu", 'session': 10, 'prod_dir': "alu_10"}] # Set sessions sessions_data = [{'id': 1, 'workflow_status': True, 'release': "1"}, {'id': 2, 'workflow_status': False, 'release': "2"}, {'id': 3, 'workflow_status': True, 'deleted': 2, 'release': "3"}] # Set pendings pending_data = [{'id': 3, 'release': "3"}, {'id': 2, 'release': "2"}] # Set status status_data = {'over': {'status': False, 'progress': None}, 'release': {'status': False, 'progress': 10}} manager.bank.bank['last_update_session'] = 3 manager.bank.bank['current'] = 1 manager.bank.bank['status'] = status_data manager.bank.bank['production'] = production_data manager.bank.bank['sessions'] = sessions_data manager.bank.bank['pending'] = pending_data # Need to create some release directory to be tested, it is the current release (set in db and production) for i in range(1, 6, 1): release_dir = os.path.join(self.utils.data_dir, 'alu', 'alu_' + str(i)) os.makedirs(release_dir) # Create a 'current' dir os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'current')) # Force auto_delete to False Manager.set_simulate(True) self.assertTrue(manager.synchronize_db()) self.utils.drop_db() Manager.set_simulate(False) @attr('manager') @attr('manager.getbankdatadir') def test_ManagerGetBankDataDirReturnsNone(self): """Check method warn "Can't get current production directory: 'current_release' ... and returns None""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') self.assertIsNone(manager.get_bank_data_dir()) @attr('manager') @attr('manager.getbankdatadir') def test_ManagerGetBankDataDirRaisesNoCurrentRelease(self): """Check method raises "Can't get current production directory: 'current_release' ..." release ok, prod not ok """ self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['sessions'].append({'id': now, 'release': '54'}) manager.bank.bank['production'] = [] with self.assertRaises(SystemExit): manager.get_bank_data_dir() @attr('manager') @attr('manager.getbankdatadir') def test_ManagerGetBankDataDirOK(self): """Check method returns path to production dir""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['sessions'].append({'id': now, 'release': '54'}) manager.bank.bank['production'].append({'session': now, 'release': '54', 'data_dir': self.utils.data_dir}) returned = manager.get_bank_data_dir() expected = os.path.join(self.utils.data_dir, manager.bank.name) self.assertEqual(expected, returned) @attr('manager') @attr('manager.getbankdatadir') def test_ManagerGetBankDataDirRaisesNoProd(self): """Check method raises "Can't get current production directory, 'prod_dir' or 'data_dir' missing ...""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() prod_dir = 'alu_54' manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['sessions'].append({'id': now, 'release': prod_dir}) manager.bank.bank['production'].append({'session': now, 'release': prod_dir}) with self.assertRaises(SystemExit): manager.get_bank_data_dir() @attr('manager') @attr('manager.currentproddir') def test_ManagerGetCurrentProdDirRaises(self): """Check method raises "Can't get current production directory: 'current_release' ...""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') with self.assertRaises(SystemExit): manager.get_current_proddir() @attr('manager') @attr('manager.currentproddir') def test_ManagerGetCurrentProdDirRaisesNoCurrentRelease(self): """Check method raises "Can't get current production directory: 'current_release' ..." release ok, prod not ok """ self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['sessions'].append({'id': now, 'release': '54'}) manager.bank.bank['production'] = [] with self.assertRaises(SystemExit): manager.get_current_proddir() @attr('manager') @attr('manager.currentproddir') def test_ManagerGetCurrentProdDirOK(self): """Check method returns path to production dir""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() prod_dir = 'alu_54' manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['sessions'].append({'id': now, 'release': '54'}) manager.bank.bank['production'].append({'session': now, 'release': '54', 'data_dir': self.utils.data_dir, 'prod_dir': prod_dir}) returned = manager.get_current_proddir() expected = os.path.join(self.utils.data_dir, manager.bank.name, prod_dir) self.assertEqual(expected, returned) @attr('manager') @attr('manager.currentproddir') def test_ManagerGetCurrentProdDirRaisesNoProd(self): """Check method raises "Can't get current production directory, 'prod_dir' or 'data_dir' missing ...""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() prod_dir = 'alu_54' manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['sessions'].append({'id': now, 'release': prod_dir}) manager.bank.bank['production'].append({'session': now, 'release': prod_dir, 'data_dir': self.utils.data_dir}) with self.assertRaises(SystemExit): manager.get_current_proddir() @attr('manager') @attr('manager.getlastproductionok') def test_ManagerGetLastProductionokNoProductionInBankThrows(self): """Check the method throws when no 'production' in bank""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') del manager.bank.bank['production'] with self.assertRaises(SystemExit): manager.get_last_production_ok() self.utils.drop_db() @attr('manager') @attr('manager.getlastproductionok') def test_ManagerGetLastProductionOKProductionsEmptyReturnsNone(self): """Check the method returns None if 'production' is empty""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') self.assertIsNone(manager.get_last_production_ok()) self.utils.drop_db() @attr('manager') @attr('manager.getlastproductionok') def test_ManagerGetLastProductionokNoSessionsThrows(self): """Check the method throws when no 'sessions'""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.bank.bank['production'].append({'fakekey': 'ok'}) with self.assertRaises(SystemExit): manager.get_last_production_ok() self.utils.drop_db() @attr('manager') @attr('manager.getlastproductionok') def test_ManagerGetLastProductionOkRightProductionWithCurrent(self): """Check the method get last 'production' when a 'current' is set""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['production'].append({'session': now, 'remoterelease': '1'}) manager.bank.bank['production'].append({'session': now + 1, 'remoterelease': '2'}) prod = manager.get_last_production_ok() self.assertEqual(prod['remoterelease'], '2') self.utils.drop_db() @attr('manager') @attr('manager.getlastproductionok') def test_ManagerGetLastProductionOKRightProductionNoCurrent(self): """Check the method return right production without 'current' set""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') del manager.bank.bank['current'] manager.bank.bank['production'].append({'session': now, 'remoterelease': '1'}) manager.bank.bank['production'].append({'session': now + 2, 'remoterelease': '3'}) manager.bank.bank['production'].append({'session': now + 1, 'remoterelease': '2'}) prod = manager.get_last_production_ok() self.assertEqual(prod['remoterelease'], '2') self.utils.drop_db() @attr('manager') @attr('manager.getlastproductionok') def test_ManagerGetLastProductionOKLastProductionIsCurrent(self): """Check the method returns None when 'current' is the last 'production'""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['production'].append({'session': now, 'remoterelease': '54'}) prod = manager.get_last_production_ok() self.assertIsNone(prod) self.utils.drop_db() @attr('manager') @attr('manager.getverbose') def test_ManagerGetVerboseTrue(self): """Check manager.get_verbose() get True when Manager.verbose = True""" Manager.verbose = True self.assertTrue(Manager.get_verbose()) @attr('manager') @attr('manager.getverbose') def test_ManagerGetVerboseFalse(self): """Check manager.get_verbose() get False when Manager.verbose = False""" Manager.verbose = False self.assertFalse(Manager.get_verbose()) @attr('manager') @attr('manager.getsimulate') def test_ManagerGetSimulateTrue(self): """Check manager.get_simulate() get True when Manager.simulate = True""" Manager.simulate = True self.assertTrue(Manager.get_simulate()) @attr('manager') @attr('manager.getsimulate') def test_ManagerGetSimulateFalse(self): """Check manager.get_simulate() get False when Manager.simulate = False""" Manager.simulate = False self.assertFalse(Manager.get_simulate()) @attr('manager') @attr('manager.banklist') def test_ManagerGetBankListWrongVisibility(self): """Check bank list throws OK with wrong visibility""" with self.assertRaises(SystemExit): Manager.get_bank_list(visibility="fake") @attr('manager') @attr('manager.banklist') def test_ManagerGetBankListOK(self): """Check bank list works OK""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) self.utils.copy_file(ofile='minium.properties', todir=self.utils.conf_dir) # Create 2 entries into the database Manager(bank='alu') Manager(bank='minium') manual_list = ['alu', 'minium'] bank_list = Manager.get_bank_list() self.assertListEqual(bank_list, manual_list) self.utils.drop_db() @attr('manager') @attr('manager.banklist') def test_ManagerGetBankListBioMAJConfigNOTOK(self): """Check bank list throws SystemExit exception""" from biomaj.mongo_connector import MongoConnector from biomaj_core.config import BiomajConfig # Unset MongoConnector and env BIOMAJ_CONF to force config relaod and Mongo reconnect MongoConnector.db = None BiomajConfig.global_config = None back_cfg = os.environ["BIOMAJ_CONF"] os.environ['BIOMAJ_CONF'] = "/not_found" with self.assertRaises(SystemExit): Manager.get_bank_list() os.environ['BIOMAJ_CONF'] = back_cfg self.utils.drop_db() @attr('manager') @attr('manager.banklist') def test_ManagerGetBankListMongoConnectorNOTOK(self): """Check bank list throws ServerSelectionTimeoutError ConnectionFailure exception""" from biomaj.mongo_connector import MongoConnector from biomaj_core.config import BiomajConfig # Unset MongoConnector and env BIOMAJ_CONF to force config relaod and Mongo reconnect config_file = 'global-wrongMongoURL.properties' self.utils.copy_file(ofile=config_file, todir=self.utils.conf_dir) MongoConnector.db = None BiomajConfig.load_config(config_file=os.path.join(self.utils.conf_dir, config_file)) with self.assertRaises(SystemExit): Manager.get_bank_list() MongoConnector.db = None BiomajConfig.global_config = None self.utils.drop_db() @attr('manager') @attr('manager.getconfigregexp') def test_ManagerGetConfigRegExpOKWithValuesTrue(self): """Check method get the right entries from config""" manager = Manager() my_values = manager.get_config_regex(regex='.*\.dir$', with_values=False) expected = [['lock.dir'], ['log.dir'], ['process.dir'], ['data.dir'], ['cache.dir'], ['conf.dir']] self.assertListEqual(my_values, sorted(expected)) self.utils.drop_db() @attr('manager') @attr('manager.getconfigregexp') def test_ManagerGetConfigRegExpOKWithValuesFalse(self): """Check method get the right entries from config""" manager = Manager() my_values = manager.get_config_regex(regex='^db\.', with_values=True) self.assertListEqual(my_values, [['db.name', self.utils.db_test], ['db.url', self.utils.mongo_url]]) self.utils.drop_db() @attr('manager') @attr('manager.getconfigregexp') def test_ManagerGetConfigRegExpNoRegExp(self): """Check method get the right entries from config""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') with self.assertRaises(SystemExit): manager.get_config_regex() @attr('manager') @attr('manager.getbankpackages') def test_ManagerGetBankPackagesOK(self): """Check get_bank_packages() get the right packages list""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') packs = ['pack@blast@2.2.26', 'pack@fasta@3.6'] bank_packs = manager.get_bank_packages() self.assertListEqual(packs, bank_packs) self.utils.drop_db() @attr('manager') @attr('manager.getbankpackages') def test_ManagerGetBankPackagesNoneOK(self): """Check get_bank_packages() returns empty list as bank config file does not have db.packages set""" self.utils.copy_file(ofile='minium.properties', todir=self.utils.conf_dir) manager = Manager(bank='minium') Manager.set_verbose(True) bank_packs = manager.get_bank_packages() self.assertListEqual(bank_packs, []) self.utils.drop_db() @attr('manager') @attr('manager.getformatsforrelease') def test_ManagerGetFormatsForReleaseOK(self): """Check we get the right list for a bank supported formats""" expected = [] for directory in ['flat', 'blast2/2.2.21', 'fasta/3.6', 'golden/3.0']: os.makedirs(os.path.join(self.utils.data_dir, directory)) expected.append('@'.join(['pack'] + directory.split('/'))) returned = Manager.get_formats_for_release(path=self.utils.data_dir) expected.pop(0) self.assertListEqual(expected, returned) @attr('manager') @attr('manager.getformatsforrelease') def test_ManagerGetFormatsForReleaseRaises(self): """Check method throws error""" with self.assertRaises(SystemExit): Manager.get_formats_for_release() @attr('manager') @attr('manager.getformatsforrelease') def test_ManagerGetFormatsForReleasePathNotExistsEmptyList(self): """Check method throws error""" returned = Manager.get_formats_for_release(path="/not_found") self.assertListEqual(returned, []) @attr('manager') @attr('manager.getlastsession') def test_ManagerGetLastSessionOK(self): """Check method returns correct session""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['sessions'].append({'id': now, 'name': 'session1'}) manager.bank.bank['sessions'].append({'id': now + 1, 'name': 'session2'}) manager.bank.bank['sessions'].append({'id': now + 2, 'name': 'session3'}) returned = manager._get_last_session() self.assertDictEqual(returned, {'id': now + 2, 'name': 'session3'}) self.utils.drop_db() @attr('manager') @attr('manager.getlastsession') def test_ManagerGetLastSessionThrows(self): """Check method throws exception""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') del manager.bank.bank['sessions'] with self.assertRaises(SystemExit): manager._get_last_session() self.utils.drop_db() @attr('manager') @attr('manager.history') def test_ManagerHistoryNoProductionRaisesError(self): """Check when no 'production' field in bank, history raises exception""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.bank.bank['production'] = None with self.assertRaises(SystemExit): manager.history() self.utils.drop_db() @attr('manager') @attr('manager.history') def test_ManagerHistoryNoSessionsRaisesError(self): """Check when no 'sessions' field in bank, history raises exception""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.bank.bank['production'].append({'session': 100, 'release': 12}) manager.bank.bank['sessions'] = None with self.assertRaises(SystemExit): manager.history() self.utils.drop_db() @attr('manager') @attr('manager.history') def test_ManagerHistoryCheckIDSessionsOK(self): """Check bank has right session id""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') _id = "@".join(['bank', 'alu', '12', Utils.time2datefmt(100)]) manager.bank.bank['production'].append({'session': 100, 'release': 12, 'prod_dir': "/tmp", 'dir_version': "alu", 'remoterelease': 12, 'freeze': False, 'data_dir': "/tmp"}) manager.bank.bank['current'] = 100 manager.bank.bank['sessions'].append({'id': 100, 'remoterelease': 12, 'last_update_time': 100, 'status': {}}) history = manager.history() self.assertEqual(history[0]['_id'], _id) self.utils.drop_db() @attr('manager') @attr('manager.history') def test_ManagerHistoryCheckStatusDeprecatedOK(self): """Check bank has status deprecated""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.bank.bank['production'].append({'session': 100, 'release': 12, 'prod_dir': "/tmp", 'dir_version': "alu", 'remoterelease': 12, 'freeze': False, 'data_dir': "/tmp"}) manager.bank.bank['current'] = 99 manager.bank.bank['sessions'].append({'id': 100, 'remoterelease': 12, 'last_update_time': 100, 'status': {}}) history = manager.history() self.assertEqual(history[0]['status'], 'unpublished') manager.bank.bank['current'] = 101 history = manager.history() self.assertEqual(history[0]['status'], 'deprecated') self.utils.drop_db() @attr('manager') @attr('manager.history') def test_ManagerHistoryStatusUnpublishedOK(self): """Check bank not published yet (first run) has status unpublished""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.bank.bank['production'].append({'session': 100, 'release': 12, 'prod_dir': "/tmp", 'dir_version': "alu", 'remoterelease': 12, 'freeze': False, 'data_dir': "/tmp"}) manager.bank.bank['sessions'].append({'id': 100, 'remoterelease': 12, 'last_update_time': 100, 'status': {}}) del manager.bank.bank['current'] history = manager.history() self.assertEqual(history[0]['status'], 'unpublished') self.utils.drop_db() @attr('manager') @attr('manager.history') def test_ManagerHistorySessionsHistoryANDStatusDeletedOK(self): """Check bank has status deprecated, no 'current' set""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_12')) manager.bank.bank['production'].append({'session': 100, 'release': 12, 'prod_dir': "/tmp", 'dir_version': "alu", 'remoterelease': 12, 'freeze': False, 'data_dir': "/tmp"}) manager.bank.bank['sessions'].append({'id': 101, 'data_dir': self.utils.data_dir, 'dir_version': "alu", 'prod_dir': "alu_12", 'remoterelease': 12, 'last_update_time': 100, 'last_modified': 100, 'status': {'remove_release': True}}) history = manager.history() self.assertEqual(history[0]['status'], 'unpublished') self.utils.drop_db() @attr('manager') @attr('manager.history') def test_ManagerHistorySessionsHistoryANDSessionDeletedOK(self): """As we kept sessions history, we check old deleted session have 'deleted' with date in sessions""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_12')) manager.bank.bank['production'].append({'session': 100, 'release': 12, 'prod_dir': "/tmp", 'dir_version': "alu", 'remoterelease': 12, 'freeze': False, 'data_dir': "/tmp"}) manager.bank.bank['sessions'].append({'id': 101, 'data_dir': self.utils.data_dir, 'dir_version': "alu", 'prod_dir': "alu_12", 'remoterelease': 12, 'last_update_time': 100, 'last_modified': 100, 'status': {'remove_release': True}}) manager.bank.bank['sessions'].append({'id': 98, 'data_dir': self.utils.data_dir, 'dir_version': "alu", 'prod_dir': "alu_12", 'remoterelease': 12, 'last_update_time': 98, 'last_modified': 98, 'status': {'remove_release': True}, 'deleted': 0}) history = manager.history() self.assertEqual(history[1]['status'], 'deleted') self.utils.drop_db() @attr('manager') @attr('manager.bankversions') def test_ManagerSaveBankVersionsNotOK(self): """Check method throw exception, can't create directory""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.bank.bank['properties']['owner'] = manager.config.get('GENERAL', 'admin') back_log = os.environ["LOGNAME"] os.environ["LOGNAME"] = manager.config.get('GENERAL', 'admin') with self.assertRaises(SystemExit): manager.save_banks_version(bank_file='/not_found/saved_versions.txt') # Reset to the right user name as previously os.environ["LOGNAME"] = back_log self.utils.drop_db() @attr('manager') @attr('manager.bankversions') def test_ManagerSaveBankVersionsThrowsException(self): """Check method throw exception, can't access file""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.bank.bank['properties']['owner'] = manager.config.get('GENERAL', 'admin') back_log = os.environ["LOGNAME"] outputfile = os.path.join(self.utils.data_dir, 'saved_versions.txt') open(outputfile, 'w').close() os.chmod(outputfile, self.utils.no_dir_rights) os.environ["LOGNAME"] = manager.config.get('GENERAL', 'admin') with self.assertRaises(SystemExit): manager.save_banks_version(bank_file=outputfile) # Reset to the right user name as previously os.environ["LOGNAME"] = back_log @attr('manager') @attr('manager.bankversions') def test_ManagerSaveBankVersionsNoFileOK(self): """Test exceptions""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.set_simulate(True) manager.bank.banks.update({'name': 'alu'}, {'$set': {'current': now}, '$push': { 'production': {'session': now, 'remoterelease': '54', 'size': '100Mo'}}}) # Prints on output using simulate mode self.assertTrue(manager.save_banks_version()) # Reset to the right user name as previously self.utils.drop_db() @attr('manager') @attr('manager.bankversions') def test_ManagerSaveBankVersionsFileContentOK(self): """Test exceptions""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() output_file = os.path.join(self.utils.data_dir, 'saved_version.txt') manager = Manager(bank='alu') manager.bank.banks.update({'name': 'alu'}, {'$set': {'current': now}, '$push': { 'production': {'session': now, 'remoterelease': '54', 'size': '100Mo'}}}) # Prints on output using simulate mode back_patt = Manager.SAVE_BANK_LINE_PATTERN Manager.SAVE_BANK_LINE_PATTERN = "%s_%s_%s_%s_%s" manager.save_banks_version(bank_file=output_file) line = Manager.SAVE_BANK_LINE_PATTERN % ('alu', "Release " + '54', Utils.time2datefmt(now), '100Mo', manager.bank.config.get('server')) with open(output_file, 'r') as of: for oline in of: self.assertEqual(line, oline) # Restore default pattern Manager.SAVE_BANK_LINE_PATTERN = back_patt self.utils.drop_db() @attr('manager') @attr('manager.bankversions') def test_ManagerSaveBankVersionsManagerVerboseOK(self): """Test exceptions""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.banks.update({'name': 'alu'}, {'$set': {'current': now}, '$push': { 'production': {'session': now, 'remoterelease': '54', 'size': '100Mo'}}}) # Set verbose mode Manager.set_verbose(True) self.assertTrue(manager.save_banks_version()) # Unset verbose mode manager.set_bank(False) self.utils.drop_db() @attr('manager') @attr('manager.nextrelease') def test_ManagerNextReleaseAlreadySet(self): """Check we directly return next_release if already set""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager._next_release = '55' self.assertEqual(manager.next_release(), '55') self.utils.drop_db() @attr('manager') @attr('manager.nextrelease') def test_ManagerNextReleaseThrowsNoProduction(self): """Check method throws an exception if no production yet""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now with self.assertRaises(SystemExit): manager.next_release() self.utils.drop_db() @attr('manager') @attr('manager.nextrelease') def test_ManagerNextReleaseThrowsNoSessions(self): """Check method throws an exception if no 'sessions'""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['production'].append({'session': now, 'remoterelease': '55'}) manager.bank.bank['production'].append({'session': now + 1, 'remoterelease': '56'}) del manager.bank.bank['sessions'] with self.assertRaises(SystemExit): manager.next_release() self.utils.drop_db() @attr('manager') @attr('manager.nextrelease') def test_ManagerNextReleaseReturnsNone(self): """Check method returns a None release because 'workflow_status' is False""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['production'].append({'session': now}) manager.bank.bank['production'].append({'session': now + 1}) manager.bank.bank['sessions'].append({'id': now, 'remoterelease': '54'}) manager.bank.bank['sessions'].append({'id': now + 1, 'remoterelease': '55', 'workflow_status': False}) self.assertIsNone(manager.next_release()) self.utils.drop_db() @attr('manager') @attr('manager.nextrelease') def test_ManagerNextReleasePassesContinue(self): """Check method get the right release (next) using 'remoterelease' from sessions""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['production'].append({'session': now}) manager.bank.bank['production'].append({'session': now + 1}) manager.bank.bank['sessions'].append({'id': now, 'remoterelease': '54'}) manager.bank.bank['sessions'].append({'id': now + 1, 'remoterelease': '55', 'workflow_status': True}) self.assertEqual('55', manager.next_release()) self.utils.drop_db() @attr('manager') @attr('manager.nextrelease') def test_ManagerNextReleasePassesContinue(self): """Check method get the right release (next) using 'release' from sessions""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['production'].append({'session': now}) manager.bank.bank['production'].append({'session': now + 1}) manager.bank.bank['sessions'].append({'id': now, 'release': '54'}) manager.bank.bank['sessions'].append({'id': now + 1, 'release': '55', 'workflow_status': True}) self.assertEqual('55', manager.next_release()) self.utils.drop_db() @attr('manager') @attr('manager.nextrelease') def test_ManagerNextReleaseSessionsMissingReleaseFieldsThorws(self): """Check method throws because nor 'remoterelease' nor 'release' found in sessions""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['production'].append({'session': now}) manager.bank.bank['production'].append({'session': now + 1}) manager.bank.bank['sessions'].append({'id': now}) manager.bank.bank['sessions'].append({'id': now + 1, 'workflow_status': True}) with self.assertRaises(SystemExit): manager.next_release() self.utils.drop_db() @attr('manager') @attr('manager.nextswitch') def test_ManagerNextSwitchDateThrows(self): """Check the method throws when wrong arg passed""" manager = Manager() with self.assertRaises(SystemExit): manager.next_switch_date(week='wrong') @attr('manager') @attr('manager.nextswitch') def test_ManagerNextSwitchDateNoConfigThrows(self): """Check the method throws when wrong arg passed""" manager = Manager() manager.config.remove_option('MANAGER', 'switch.week') with self.assertRaises(SystemExit): manager.next_switch_date() @attr('manager') @attr('manager.nextswitch') def test_ManagerNextSwitchDateConfigThrows(self): """Check the method throws when wrong config""" manager = Manager() manager.config.set('MANAGER', 'switch.week', 'wrong') with self.assertRaises(SystemExit): manager.next_switch_date() @attr('manager') @attr('manager.nextswitch') def test_ManagerNextSwitchWithConfigEachWeek(self): """Check the method gives the right next bank switch date. We are expecting the same week as today""" manager = Manager() # Get the current week week_num = datetime.today().isocalendar()[1] manager.config.set('MANAGER', 'switch.week', 'each') returned = manager.next_switch_date() self.assertEqual(week_num, returned.isocalendar()[1]) @attr('manager') @attr('manager.nextswitch') def test_ManagerNextSwitchWithConfigNextWeek(self): """Check the method gives the right next bank switch date. We are expecting next week""" manager = Manager() # Get the current week week_num = datetime.today().isocalendar()[1] # We are setting config value to get value for next week if not week_num % 2: manager.config.set('MANAGER', 'switch.week', 'odd') else: manager.config.set('MANAGER', 'switch.week', 'even') returned = manager.next_switch_date() # As the expected week must be next week, it is current week number + 1 self.assertEqual(week_num + 1, returned.isocalendar()[1]) @attr('manager') @attr('manager.nextswitch') def test_ManagerNextSwitchWithConfigThisWeek(self): """Check the method gives the right next bank switch date. We are expecting next week""" manager = Manager() # Get the current week week_num = datetime.today().isocalendar()[1] # We are setting config value to get value for next week if not week_num % 2: manager.config.set('MANAGER', 'switch.week', 'even') else: manager.config.set('MANAGER', 'switch.week', 'odd') returned = manager.next_switch_date() self.assertEqual(week_num, returned.isocalendar()[1]) @attr('manager') @attr('manager.setbank') def test_ManagerSetBankOK(self): """Check method checks are ok""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager() from biomaj.bank import Bank b = Bank('alu', no_log=True) self.assertTrue(manager.set_bank(bank=b)) self.utils.drop_db() @attr('manager') @attr('manager.setbank') def test_ManagerSetBankNOTOK(self): """Check method checks are not ok""" manager = Manager() self.assertFalse(manager.set_bank()) @attr('manager') @attr('manager.setbank') def test_ManagerSetBankWrongInstanceOK(self): """Check method checks are not ok""" manager = Manager() self.assertFalse(manager.set_bank(bank=Manager())) @attr('manager') @attr('manager.setbank') def test_ManagerSetBankFromNameFalse(self): """Check method checks are not ok""" manager = Manager() self.assertFalse(manager.set_bank_from_name("")) @attr('manager') @attr('manager.setbank') def test_ManagerSetBankFromNameThrowsWrongBankName(self): """Check method throws excpetion with wrong bank name""" manager = Manager() with self.assertRaises(SystemExit): manager.set_bank_from_name("no_bank") @attr('manager') @attr('manager.setbank') def test_ManagerSetBankFromNameOK(self): """Check method checks are not ok""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager() self.assertTrue(manager.set_bank_from_name("alu")) self.utils.drop_db() @attr('manager') @attr('decorators.deprecated') @attr('manager.setsequencecount') def test_ManagerSetSequenceCountSeqFileThrows(self): """Check missing arg seq_file throws""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') with self.assertRaises(SystemExit): manager.set_sequence_count(seq_count=1, release="54") self.utils.drop_db() @attr('manager') @attr('decorators.deprecated') @attr('manager.setsequencecount') def test_ManagerSetSequenceCountSeqFileNotHereThrows(self): """Check missing file not exists throws""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') with self.assertRaises(SystemExit): manager.set_sequence_count(seq_file="/not_found/file.fa", seq_count=1, release="54") self.utils.drop_db() @attr('manager') @attr('decorators.deprecated') @attr('manager.setsequencecount') def test_ManagerSetSequenceCountSeqCountThrows(self): """Check missing args throws""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2')) open(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2', 'news1.txt'), 'w').close() with self.assertRaises(SystemExit): manager.set_sequence_count(seq_file=os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2', 'news1.txt'), release="54") self.utils.drop_db() @attr('manager') @attr('decorators.deprecated') @attr('manager.setsequencecount') def test_ManagerSetSequenceCountReleaseThrows(self): """Check missing args throws""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2')) open(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2', 'news1.txt'), 'w').close() with self.assertRaises(SystemExit): manager.set_sequence_count(seq_file=os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2', 'news1.txt'), seq_count=10) self.utils.drop_db() @attr('manager') @attr('decorators.deprecated') @attr('manager.setsequencecount') def test_ManagerSetSequenceCountReturnsTrue(self): """Check method returns True""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.bank.banks.update({'name': 'alu'}, {'$set': {'production.0.release': "54"}}) os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2')) open(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2', 'news1.txt'), 'w').close() # self.assertTrue(manager.set_sequence_count(seq_file=os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2', # 'news1.txt'), # seq_count=10, release="54")) with self.assertRaises(SystemExit): self.assertTrue(manager.set_sequence_count(seq_file=os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2', 'news1.txt'), seq_count=10, release="54")) self.utils.drop_db() @attr('manager') @attr('decorators.deprecated') @attr('manager.setsequencecount') def test_ManagerSetSequenceCountUpdateOKReturnsTrue(self): """Check method update db and returns True""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_verbose(True) manager.bank.banks.update({'name': 'alu'}, {'$set': {'production.0.release': "54"}}) manager.bank.banks.update({'name': 'alu', 'production.release': "54"}, {'$push': {'production.$.files_info': {'name': os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2', 'news1.txt')}}}) os.makedirs(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2')) open(os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2', 'news1.txt'), 'w').close() # self.assertTrue(manager.set_sequence_count(seq_file=os.path.join(self.utils.data_dir, # 'alu', # 'alu_54', # 'blast2', # 'news1.txt'), # seq_count=10, release="54")) with self.assertRaises(SystemExit): self.assertTrue(manager.set_sequence_count(seq_file=os.path.join(self.utils.data_dir, 'alu', 'alu_54', 'blast2', 'news1.txt'), seq_count=10, release="54")) self.utils.drop_db() @attr('manager') @attr('manager.setverbose') def test_ManagerSetVerboseReturnsTrue(self): """Check set verbose set the correct boolean""" self.assertTrue(Manager.set_verbose("OK")) @attr('manager') @attr('manager.setverbose') def test_ManagerSetVerboseReturnsFalse(self): """Check set verbose set the correct boolean""" self.assertFalse(Manager.set_verbose("")) @attr('manager') @attr('manager.setsimulate') def test_ManagerSetSimulateReturnsTrue(self): """Check set simulate set the correct boolean""" self.assertTrue(Manager.set_simulate("OK")) @attr('manager') @attr('manager.setsimulate') def test_ManagerSetSimulateReturnsFalse(self): """Check set simulate set the correct boolean""" self.assertFalse(Manager.set_simulate(False)) @attr('manager') @attr('manager.switch') def test_ManagerBankSwitchBankIsLocked(self): """Check manager.can_switch returns False because bank is locked""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_verbose(True) lock_file = os.path.join(manager.bank.config.get('lock.dir'), manager.bank.name + '.lock') with open(lock_file, 'a'): self.assertFalse(manager.can_switch()) os.remove(lock_file) self.utils.drop_db() @attr('manager') @attr('manager.showneedupdate') def test_ManagerShowNeedUpdateCannotSwitch(self): """Check method returns empty dict because bank cannot switch""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') # setting current to None means no current bank published. manager.bank.bank['current'] = None returned = manager.show_need_update() self.assertListEqual(returned, []) @attr('manager') @attr('manager.showneedupdate') def test_ManagerShowNeedUpdateCanSwitchOneBank(self): """Check method returns dict because bank can switch""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) # We created these 2 managers to set 2 banks in db alu = Manager(bank='alu') # We set 'current' field to avoid to return False with 'bank_is_published' now = time.time() # setting current to None means no current bank published. alu.bank.bank['current'] = now alu.bank.bank['last_update_session'] = now + 1 alu.bank.bank['production'].append({'session': now}) alu.bank.bank['production'].append({'session': now + 1}) alu.bank.bank['sessions'].append({'id': now, 'remoterelease': '54'}) alu.bank.bank['sessions'].append({'id': now + 1, 'remoterelease': '55', 'workflow_status': True}) returned = alu.show_need_update() self.assertListEqual(returned, [{'name': 'alu', 'current_release': '54', 'next_release': '55'}]) @attr('manager') @attr('manager.showneedupdate') def test_ManagerShowNeedUpdateCanSwitchTwoBank(self): """Check method returns dict because bank can switch""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) self.utils.copy_file(ofile='minium.properties', todir=self.utils.conf_dir) now = time.time() # We created these 2 managers to set 2 banks in db alu = Manager(bank='alu') minium = Manager(bank='minium') # We update the bank in db to mimic bank ready to switch alu.bank.banks.update({'name': 'alu'}, {'$set': {'current': now, 'production': [{'session': now + 1}]}}) alu.bank.banks.update({'name': 'alu'}, {'$set': {'last_update_session': now + 1}}) alu.bank.banks.update({'name': 'alu'}, {'$set': {'sessions': [{'id': now + 1, 'workflow_status': True, 'remoterelease': '54'}]}}) minium.bank.banks.update({'name': 'minium'}, {'$set': {'current': now, 'production': [{'session': now + 1}]}}) minium.bank.banks.update({'name': 'minium'}, {'$set': {'last_update_session': now + 1}}) minium.bank.banks.update({'name': 'minium'}, {'$set': {'sessions': [{'id': now + 1, 'workflow_status': True, 'remoterelease': '55'}]}}) # We reload the banks manager = Manager() returned = manager.show_need_update() self.assertEqual(len(returned), 2) self.utils.drop_db() @attr('manager') @attr('manager.switch') def test_ManagerBankSwitchBankNotPublished(self): """Check manager.can_switch returns False because bank not published yet""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') manager.set_verbose(True) # To be sure we set 'current' from MongoDB to null manager.bank.bank['current'] = None self.assertFalse(manager.can_switch()) self.utils.drop_db() @attr('manager') @attr('manager.switch') def test_ManagerBankSwitchBankUpdateNotReady(self): """Check manager.can_switch returns False because update not ready""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') # We set 'current' field to avoid to return False with 'bank_is_published' now = time.time() manager.set_verbose(True) # To be sure we set 'current' from MongoDB to null manager.bank.bank['current'] = now manager.bank.bank['last_update_session'] = now manager.bank.bank['sessions'].append({'id': now, 'workflow_status': True}) self.assertFalse(manager.can_switch()) self.utils.drop_db() @attr('manager') @attr('manager.switch') def test_ManagerBankSwitchBankLastSessionFailed(self): """Check manager.can_switch returns False because last session failed""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') # We set 'current' field to avoid to return False with 'bank_is_published' now = time.time() manager.set_verbose(True) manager.bank.bank['current'] = now manager.bank.bank['last_update_session'] = now manager.bank.bank['sessions'].append({'id': now, 'workflow_status': False}) self.assertFalse(manager.can_switch()) self.utils.drop_db() @attr('manager') @attr('manager.switch') def test_ManagerBankSwitch_SwitchTrue(self): """Check manager.can_switch returns True""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') # We set 'current' field to avoid to return False with 'bank_is_published' now = time.time() manager.bank.bank['current'] = now # To be sure we set 'current' from MongoDB to null manager.bank.bank['last_update_session'] = now + 1 manager.bank.bank['production'].append({'session': now + 1}) manager.bank.bank['sessions'].append({'id': now + 1, 'workflow_status': True}) self.assertTrue(manager.can_switch()) self.utils.drop_db() @attr('manager') @attr('manager.updateready') def test_ManagerBankUpdateReadyRaisesErrorOK(self): """Check the method raises exception""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) manager = Manager(bank='alu') Manager.set_verbose(True) self.assertFalse(manager.update_ready()) self.utils.drop_db() @attr('manager') @attr('manager.updateready') def test_ManagerBankUpdateReadyLastProductionOKReturnsNone(self): """Check method returns None when 'production' is empty through (get_last_production_ok)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['last_update_session'] = now manager.bank.bank['production'] = [] self.assertFalse(manager.update_ready()) @attr('manager') @attr('manager.updateready') def test_ManagerBankUpdateReadyWithCurrentTrue(self): """Check the method returns True, current != last_update_session and production + sessions""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.bank.bank['current'] = now manager.bank.bank['last_update_session'] = now + 1 manager.bank.bank['production'].append({'session': now + 1}) manager.bank.bank['sessions'].append({'id': now + 1, 'workflow_status': True}) self.assertTrue(manager.update_ready()) self.utils.drop_db() @attr('manager') @attr('manager.updateready') def test_ManagerBankUpdateReadyWithCurrentFalse(self): """Check the method returns False""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.set_verbose(True) manager.bank.bank['current'] = now manager.bank.bank['last_update_session'] = now self.assertFalse(manager.update_ready()) self.utils.drop_db() @attr('manager') @attr('manager.updateready') def test_ManagerBankUpdateReadyWithProductionAndContinueFalse(self): """Check the method returns False and current set and session has its id (pass through continue statement)""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.set_verbose(True) manager.bank.bank['current'] = now manager.bank.bank['last_update_session'] = now + 1 manager.bank.bank['production'].append({'session': now}) manager.bank.bank['production'].append({'session': now + 1}) manager.bank.bank['sessions'].append({'id': now + 1, 'workflow_status': False}) self.assertFalse(manager.update_ready()) self.utils.drop_db() @attr('manager') @attr('manager.updateready') def test_ManagerBankUpdateReadyNoSessionFalse(self): """Check the method returns False because no 'session'""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.set_verbose(True) del manager.bank.bank['current'] manager.bank.bank['last_update_session'] = now manager.bank.bank['production'].append({'session': now}) self.assertFalse(manager.update_ready()) self.utils.drop_db() @attr('manager') @attr('manager.updateready') def test_ManagerBankUpdateReadyWithNoProductionThrows(self): """Check the method returns throws because no 'production'""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') manager.set_verbose(True) del manager.bank.bank['current'] del manager.bank.bank['production'] manager.bank.bank['last_update_session'] = now with self.assertRaises(SystemExit): self.assertFalse(manager.update_ready()) self.utils.drop_db() @attr('manager') @attr('manager.updateready') def test_ManagerBankUpdateReadyWithSessionsFalse(self): """Check the method returns using 'sessions'""" self.utils.copy_file(ofile='alu.properties', todir=self.utils.conf_dir) now = time.time() manager = Manager(bank='alu') del manager.bank.bank['current'] manager.bank.bank['last_update_session'] = now manager.bank.bank['production'].append({'session': now + 1}) manager.bank.bank['sessions'].append({'id': now, 'remoterelease': '54'}) manager.bank.bank['sessions'].append({'id': now + 1, 'remoterelease': '55', 'workflow_status': False}) self.assertFalse(manager.update_ready()) self.utils.drop_db() @attr('manager') @attr('manager.command') def test_ManagerCommandCheckConfigThrows(self): """Check method that check config for jobs throws ok""" manager = Manager() manager.config.remove_section('JOBS') with self.assertRaises(SystemExit): manager._check_config_jobs('restart.stopped.jobs') @attr('manager') @attr('manager.command') def test_ManagerCommandCheckConfigStop(self): """Check removing info from config file returns False""" manager = Manager() manager.config.remove_option('JOBS', 'stop.running.jobs.exe') # Grant usage for current user if 'LOGNAME' in os.environ: back_log = os.environ["LOGNAME"] else: back_log = Utils.user() os.environ['LOGNAME'] = manager.config.get('GENERAL', 'admin') self.assertFalse(manager.stop_running_jobs()) os.environ["LOGNAME"] = back_log @attr('manager') @attr('manager.command') def test_ManagerCommandCheckConfigRestart(self): """Check removing info from config file returns False""" manager = Manager() manager.config.remove_option('JOBS', 'restart.stopped.jobs.exe') # Grant usage for current user if 'LOGNAME' in os.environ: back_log = os.environ["LOGNAME"] else: back_log = Utils.user() os.environ['LOGNAME'] = manager.config.get('GENERAL', 'admin') self.assertFalse(manager.restart_stopped_jobs()) os.environ["LOGNAME"] = back_log @attr('manager') @attr('manager.command') def test_ManagerCommandRestartJobsScriptOK(self): """Check restart jobs runs OK""" manager = Manager() # Grant usage for current user if 'LOGNAME' in os.environ: back_log = os.environ["LOGNAME"] else: back_log = Utils.user() os.environ['LOGNAME'] = manager.config.get('GENERAL', 'admin') self.assertTrue(manager.restart_stopped_jobs()) os.environ["LOGNAME"] = back_log @attr('manager') @attr('manager.command') def test_ManagerCommandRestartJobsScriptDoesNotExists(self): """Check restart jobs runs OK""" manager = Manager() # Grant usage for current user if 'LOGNAME' in os.environ: back_log = os.environ["LOGNAME"] else: back_log = Utils.user() os.environ['LOGNAME'] = manager.config.get('GENERAL', 'admin') manager.config.set('JOBS', 'restart.stopped.jobs.exe', '/nobin/cmd') with self.assertRaises(SystemExit): manager.restart_stopped_jobs() os.environ["LOGNAME"] = back_log @attr('manager') @attr('manager.command') def test_ManagerCommandStopJobsScriptOK(self): """Check restart jobs runs OK""" manager = Manager() # Grant usage for current user if 'LOGNAME' in os.environ: back_log = os.environ["LOGNAME"] else: back_log = Utils.user() os.environ['LOGNAME'] = manager.config.get('GENERAL', 'admin') self.assertTrue(manager.stop_running_jobs()) os.environ["LOGNAME"] = back_log @attr('manager') @attr('manager.command') def test_ManagerCommandStopScriptDoesNotExists(self): """Check restart jobs runs OK""" manager = Manager() # Grant usage for current user if 'LOGNAME' in os.environ: back_log = os.environ["LOGNAME"] else: back_log = Utils.user() os.environ['LOGNAME'] = manager.config.get('GENERAL', 'admin') manager.config.set('JOBS', 'stop.running.jobs.exe', '/nobin/cmd') with self.assertRaises(SystemExit): manager.stop_running_jobs() os.environ["LOGNAME"] = back_log @attr('manager') @attr('manager.command') def test_ManagerRunCommandWithExtraArgsOK(self): """Check the addition of extra args onto the command line is OK""" manager = Manager() # Grans usage for current user if 'LOGNAME' in os.environ: back_log = os.environ["LOGNAME"] else: back_log = Utils.user() os.environ['LOGNAME'] = manager.config.get('GENERAL', 'admin') self.assertTrue(manager.stop_running_jobs(args=['EXTRA ARGS'])) os.environ["LOGNAME"] = back_log @attr('manager') @attr('manager.command') def test_ManagerRunCommandWithExtraArgsNotListThrows(self): """Check the method throws exception when extra Args for command line is not a List""" manager = Manager() # Grant usage for current user if 'LOGNAME' in os.environ: back_log = os.environ["LOGNAME"] else: back_log = Utils.user() os.environ["LOGNAME"] = manager.config.get('GENERAL', 'admin') with self.assertRaises(SystemExit): manager.stop_running_jobs(args="NOT A LIST AS ARGS") os.environ["LOGNAME"] = back_log @attr('manager') @attr('manager.command') def test_ManagerLaunchCommandOK(self): """Check a command started is OK""" manager = Manager() self.assertTrue(manager._run_command(exe='ls', args=['/tmp'], quiet=True)) @attr('manager') @attr('manager.command') def test_ManagerLaunchCommandError(self): """Check a wrong return launched command""" manager = Manager() with self.assertRaises(SystemExit): manager._run_command(exe='ls', args=['/notfound'], quiet=True) @attr('manager') @attr('manager.command') def test_ManagerRunCommandErrorNoExe(self): """Check method throws error when no 'exe'""" manager = Manager() with self.assertRaises(SystemExit): manager._run_command(args=['foobar'], quiet=True) @attr('manager') @attr('manager.command') def test_ManagerRunCommandErrorNoRights(self): """Check method throws error we can run command, no rights""" manager = Manager() with self.assertRaises(SystemExit): manager._run_command(exe='chmod', args=['-x', '/bin/ls'], quiet=False) @attr('manager') @attr('manager.command') def test_ManagerRunCommandErrorCantRunCommand(self): """Check method throws error command does not exist""" manager = Manager() with self.assertRaises(SystemExit): manager._run_command(exe='/bin/fakebin', args=['/usr/local'], quiet=True) class TestBiomajManagerPlugins(unittest.TestCase): """Class for testing biomajmanager.plugins class""" def setUp(self): """Setup stuff""" self.utils = UtilsForTests() self.utils.copy_plugins() # Make our test global.properties set as env var os.environ['BIOMAJ_CONF'] = self.utils.global_properties def tearDown(self): """Clean""" self.utils.clean() @attr('plugins') def test_PluginLoadErrorNoManager(self): """Check we've got an exception thrown when Plugin Object is build without manager as args""" with self.assertRaises(SystemExit): Plugins() @attr('plugins') def test_PluginsLoadedOK_AsStandAlone(self): """Check the Plugins Object can be build as a standalone object""" manager = Manager() plugins = Plugins(manager=manager) self.assertIsInstance(plugins, Plugins) @attr('plugins') @attr('plugins.loading') def test_PluginsLoaded(self): """Check a list of plugins are well loaded""" manager = Manager() manager.load_plugins() self.assertEqual(manager.plugins.myplugin.get_name(), 'myplugin') self.assertEqual(manager.plugins.anotherplugin.get_name(), 'anotherplugin') @attr('plugins') @attr('plugins.listplugins') def test_PluginsListPlugins(self): """Check method returns right list of configured plugins from file""" manager = Manager() returned = manager.list_plugins() expected = ['myplugin', 'anotherplugin'] self.assertListEqual(expected, returned) @attr('plugins') @attr('plugins.listplugins') def test_PluginsListPluginsWithEmptyValue(self): """Check method returns right list of configured plugins from file, we introduced some empty lines""" manager = Manager() manager.config.set('PLUGINS', 'plugins.list', 'myplugin,,,anotherplugin') returned = manager.list_plugins() expected = ['myplugin', 'anotherplugin'] self.assertListEqual(expected, returned) @attr('plugins') @attr('plugins.loading') def test_PluginsLoadingNoSection(self): """Check the availability of section 'PLUGINS' is correctly checked""" manager = Manager() manager.config.remove_section('PLUGINS') with self.assertRaises(SystemExit): manager.load_plugins() @attr('plugins') @attr('plugins.loading') def test_PluginsLoadingNoPLuginsDir(self): """Check the plugins.dir value is correctly checked""" manager = Manager() manager.config.remove_option('MANAGER', 'plugins.dir') with self.assertRaises(SystemExit): manager.load_plugins() @attr('plugins') @attr('plugins.loading') def test_PluginsLoadingNoPLuginsList(self): """Check the plugins.dir value is correctly checked""" manager = Manager() manager.config.remove_option('PLUGINS', 'plugins.list') with self.assertRaises(SystemExit): manager.load_plugins() @attr('plugins') @attr('plugins.loading') def test_PluginsLoadingNoPluginsDirExists(self): """Check the plugins.dir path is correctly checked""" manager = Manager() manager.config.set('MANAGER', 'plugins.dir', '/notfound') with self.assertRaises(SystemExit): manager.load_plugins() @attr('plugins') @attr('plugins.loading') def test_PluginsLoadingNoConfig(self): """Check config instance is OK""" manager = Manager() manager.load_plugins() try: from ConfigParser import RawConfigParser except ImportError: from configparser import RawConfigParser self.assertIsInstance(manager.plugins.myplugin.get_config(), RawConfigParser) @attr('plugins') def test_PluginsLoadingNoManager(self): """Check manager instance is OK""" manager = Manager() manager.load_plugins() self.assertIsInstance(manager.plugins.myplugin.get_manager(), Manager) @attr('plugins') def test_PluginsCheckConfigValues(self): """Check the plugins config values""" manager = Manager() manager.load_plugins() self.assertEqual(manager.plugins.myplugin.get_cfg_name(), 'myplugin') self.assertEqual(manager.plugins.myplugin.get_cfg_value(), '1') self.assertEqual(manager.plugins.anotherplugin.get_cfg_name(), 'anotherplugin') self.assertEqual(manager.plugins.anotherplugin.get_cfg_value(), '2') @attr('plugins') def test_PluginsCheckMethodValue(self): """Check the value returned by method is OK""" manager = Manager() manager.load_plugins() self.assertEqual(manager.plugins.myplugin.get_value(), 1) self.assertEqual(manager.plugins.myplugin.get_string(), 'test') self.assertEqual(manager.plugins.anotherplugin.get_value(), 1) self.assertEqual(manager.plugins.anotherplugin.get_string(), 'test') @attr('plugins') def test_PluginsCheckTrue(self): """Check boolean returned by method""" manager = Manager() manager.load_plugins() self.assertTrue(manager.plugins.myplugin.get_true()) self.assertTrue(manager.plugins.anotherplugin.get_true()) @attr('plugins') def test_PluginsCheckFalse(self): """Check boolean returned by method""" manager = Manager() manager.load_plugins() self.assertFalse(manager.plugins.myplugin.get_false()) self.assertFalse(manager.plugins.anotherplugin.get_false()) @attr('plugins') def test_PluginsCheckNone(self): """Check None returned by method""" manager = Manager() manager.load_plugins() self.assertIsNone(manager.plugins.myplugin.get_none()) self.assertIsNone(manager.plugins.anotherplugin.get_none()) @attr('plugins') def test_PluginsCheckException(self): """Check exception returned by method""" manager = Manager() manager.load_plugins() self.assertRaises(Exception, manager.plugins.myplugin.get_exception()) self.assertRaises(Exception, manager.plugins.anotherplugin.get_exception())
horkko/biomaj-manager
tests/biomaj-manager_tests.py
Python
gpl-2.0
190,492
[ "BLAST" ]
e387f47f36ece342ab1b86c4f480c92f4bc7b50bc31e562eb195937ffc826d63
""" CloudEndpoint is a base class for the clients used to connect to different cloud providers """ from __future__ import print_function from __future__ import division from __future__ import absolute_import __RCSID__ = "$Id$" import os import ssl import time from libcloud import security from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.common.exceptions import BaseHTTPError # DIRAC from DIRAC import gLogger, S_OK, S_ERROR from DIRAC.Core.Utilities.File import makeGuid from DIRAC.Resources.Cloud.Endpoint import Endpoint from DIRAC.Resources.Cloud.Utilities import STATE_MAP class CloudEndpoint(Endpoint): """CloudEndpoint base class""" def __init__(self, parameters=None): super(CloudEndpoint, self).__init__(parameters=parameters) # logger self.log = gLogger.getSubLogger("CloudEndpoint") self.valid = False result = self.initialize() if result["OK"]: self.log.debug("CloudEndpoint created and validated") self.valid = True def initialize(self): # Relax security security.SSL_VERSION = ssl.PROTOCOL_SSLv23 security.VERIFY_SSL_CERT = False # Variables needed to contact the service connDict = {} for var in [ "ex_domain_name", "ex_force_auth_url", "ex_force_service_region", "ex_force_auth_version", "ex_tenant_name", "ex_keyname", "ex_voms_proxy", ]: if var in self.parameters: connDict[var] = self.parameters[var] username = self.parameters.get("User") password = self.parameters.get("Password") for key in connDict: self.log.info("%s: %s" % (key, connDict[key])) # get cloud driver providerName = self.parameters.get("Provider", "OPENSTACK").upper() providerCode = getattr(Provider, providerName) self.driverClass = get_driver(providerCode) self.__driver = self.driverClass(username, password, **connDict) return self.__checkConnection() def __checkConnection(self): """ Checks connection status by trying to list the images. :return: S_OK | S_ERROR """ try: _result = self.__driver.list_images() except Exception as errmsg: return S_ERROR(errmsg) return S_OK() def __getImageByName(self, imageName): """ Given the imageName, returns the current image object from the server. :Parameters: **imageName** - `string` imageName as stored on the OpenStack image repository ( glance ) :return: S_OK( image ) | S_ERROR """ try: images = self.__driver.list_images() except Exception as errmsg: return S_ERROR(errmsg) image = None for im in images: if im.name == imageName: image = im break if image is None: return S_ERROR("Image %s not found" % imageName) return S_OK(image) def __getFlavorByName(self, flavorName): """ Given the flavorName, returns the current flavor object from the server. :Parameters: **flavorName** - `string` flavorName as stored on the OpenStack service :return: S_OK( flavor ) | S_ERROR """ try: flavors = self.__driver.list_sizes() except Exception as errmsg: return S_ERROR(errmsg) flavor = None for fl in flavors: if fl.name == flavorName: flavor = fl if flavor is None: return S_ERROR("Flavor %s not found" % flavorName) return S_OK(flavor) def __getSecurityGroups(self, securityGroupNames=None): """ Given the securityGroupName, returns the current security group object from the server. :Parameters: **securityGroupName** - `string` securityGroupName as stored on the OpenStack service :return: S_OK( securityGroup ) | S_ERROR """ if not securityGroupNames: securityGroupNames = [] elif not isinstance(securityGroupNames, list): securityGroupNames = [securityGroupNames] if "default" not in securityGroupNames: securityGroupNames.append("default") try: secGroups = self.__driver.ex_list_security_groups() except Exception as errmsg: return S_ERROR(errmsg) return S_OK([secGroup for secGroup in secGroups if secGroup.name in securityGroupNames]) def createInstances(self, vmsToSubmit): outputDict = {} for nvm in range(vmsToSubmit): instanceID = makeGuid()[:8] createPublicIP = "ipPool" in self.parameters result = self.createInstance(instanceID, createPublicIP) if result["OK"]: node, publicIP = result["Value"] self.log.debug("Created VM instance %s/%s with publicIP %s" % (node.id, instanceID, publicIP)) nodeDict = {} nodeDict["PublicIP"] = publicIP nodeDict["InstanceID"] = instanceID nodeDict["NumberOfProcessors"] = self.flavor.vcpus nodeDict["RAM"] = self.flavor.ram nodeDict["DiskSize"] = self.flavor.disk nodeDict["Price"] = self.flavor.price outputDict[node.id] = nodeDict else: break if not outputDict: # Submission failed return result return S_OK(outputDict) def createInstance(self, instanceID="", createPublicIP=True): """ This creates a VM instance for the given boot image and creates a context script, taken the given parameters. Successful creation returns instance VM Boots a new node on the OpenStack server defined by self.endpointConfig. The 'personality' of the node is done by self.imageConfig. Both variables are defined on initialization phase. The node name has the following format: <bootImageName><contextMethod><time> It boots the node. If IPpool is defined on the imageConfiguration, a floating IP is created and assigned to the node. :return: S_OK( ( nodeID, publicIP ) ) | S_ERROR """ if not instanceID: instanceID = makeGuid()[:8] self.parameters["VMUUID"] = instanceID self.parameters["VMType"] = self.parameters.get("CEType", "OpenStack") createNodeDict = {} # Get the image object if "ImageID" in self.parameters: try: image = self.__driver.get_image(self.parameters["ImageID"]) except BaseHTTPError as err: if err.code == 404: # Image not found return S_ERROR("Image with ID %s not found" % self.parameters["ImageID"]) return S_ERROR("Failed to get image for ID %s (%s)" % (self.parameters["ImageID"], str(err))) elif "ImageName" in self.parameters: result = self.__getImageByName(self.parameters["ImageName"]) if not result["OK"]: return result image = result["Value"] else: return S_ERROR("No image specified") createNodeDict["image"] = image # Get the flavor object if "FlavorID" in self.parameters and "FlavorName" not in self.parameters: result = self.__getFlavorByName(self.parameters["FlavorName"]) if not result["OK"]: return result flavor = result["Value"] elif "FlavorID" in self.parameters: flavor = self.__driver.ex_get_size(self.parameters["FlavorID"]) else: return S_ERROR("No flavor specified") self.flavor = flavor createNodeDict["size"] = flavor # Get security groups # if 'ex_security_groups' in self.parameters: # result = self.__getSecurityGroups( self.parameters['ex_security_groups'] ) # if not result[ 'OK' ]: # self.log.error( result[ 'Message' ] ) # return result # self.parameters['ex_security_groups'] = result[ 'Value' ] result = self._createUserDataScript() if not result["OK"]: return result createNodeDict["ex_userdata"] = result["Value"] # Optional node contextualization parameters for param in ["ex_metadata", "ex_pubkey_path", "ex_keyname", "ex_config_drive"]: if param in self.parameters: createNodeDict[param] = self.parameters[param] createNodeDict["name"] = "DIRAC_%s" % instanceID # createNodeDict['ex_config_drive'] = True self.log.verbose("Creating node:") for key, value in createNodeDict.items(): self.log.verbose("%s: %s" % (key, value)) if "networks" in self.parameters: result = self.getVMNetwork() if not result["OK"]: return result createNodeDict["networks"] = result["Value"] if "keyname" in self.parameters: createNodeDict["ex_keyname"] = self.parameters["keyname"] if "availability_zone" in self.parameters: createNodeDict["ex_availability_zone"] = self.parameters["availability_zone"] # Create the VM instance now try: vmNode = self.__driver.create_node(**createNodeDict) except Exception as errmsg: self.log.error("Exception in driver.create_node", errmsg) return S_ERROR(errmsg) publicIP = None if createPublicIP: # Wait until the node is running, otherwise getting public IP fails try: self.__driver.wait_until_running([vmNode], timeout=600) result = self.assignFloatingIP(vmNode) if result["OK"]: publicIP = result["Value"] else: vmNode.destroy() return result except Exception as exc: self.log.debug("Failed to wait node running %s" % str(exc)) vmNode.destroy() return S_ERROR("Failed to wait until the node is Running") return S_OK((vmNode, publicIP)) def getVMNodes(self): """Get all the nodes on the endpoint :return: S_OK(list of Node) / S_ERROR """ try: nodes = self.__driver.list_nodes() except Exception as errmsg: return S_ERROR(errmsg) return S_OK(nodes) def getVMNode(self, nodeID): """ Given a Node ID, returns all its configuration details on a libcloud.compute.base.Node object. :Parameters: **nodeID** - `string` openstack node id ( not uuid ! ) :return: S_OK( Node ) | S_ERROR """ try: node = self.__driver.ex_get_node_details(nodeID) except Exception as errmsg: # Let's if the node is in the list of available nodes result = self.getVMNodes() if not result["OK"]: return S_ERROR("Failed to get nodes") nodeList = result["Value"] for nd in nodeList: if nd.id == nodeID: # Let's try again try: node = self.__driver.ex_get_node_details(nodeID) break except Exception as exc: return S_ERROR("Failed to get node details %s" % str(exc)) node = None return S_OK(node) def getVMStatus(self, nodeID): """ Get the status for a given node ID. libcloud translates the status into a digit from 0 to 4 using a many-to-one relation ( ACTIVE and RUNNING -> 0 ), which means we cannot undo that translation. It uses an intermediate states mapping dictionary, SITEMAP, which we use here inverted to return the status as a meaningful string. The five possible states are ( ordered from 0 to 4 ): RUNNING, REBOOTING, TERMINATED, PENDING & UNKNOWN. :Parameters: **uniqueId** - `string` openstack node id ( not uuid ! ) :return: S_OK( status ) | S_ERROR """ result = self.getVMNode(nodeID) if not result["OK"]: return result state = result["Value"].state if state not in STATE_MAP: return S_ERROR("State %s not in STATEMAP" % state) return S_OK(STATE_MAP[state]) def getVMNetwork(self, networkNames=None): """Get a network object corresponding to the networkName :param str networkName: network name :return: S_OK|S_ERROR network object in case of S_OK """ if not networkNames: nameList = [] else: nameList = list(networkNames) resultList = [] if not nameList: nameList = self.parameters.get("networks") if not nameList: return S_ERROR("Network names are not specified") else: nameList = nameList.split(",") result = self.__driver.ex_list_networks() for oNetwork in result: if oNetwork.name in nameList: resultList.append(oNetwork) return S_OK(resultList) def stopVM(self, nodeID, publicIP=""): """ Given the node ID it gets the node details, which are used to destroy the node making use of the libcloud.openstack driver. If three is any public IP ( floating IP ) assigned, frees it as well. :Parameters: **uniqueId** - `string` openstack node id ( not uuid ! ) **public_ip** - `string` public IP assigned to the node if any :return: S_OK | S_ERROR """ # Get Node object with node details result = self.getVMNode(nodeID) if not result["OK"]: return result node = result["Value"] if node is None: # Node does not exist return S_OK() nodeIP = node.public_ips[0] if node.public_ips else None if not publicIP and nodeIP is not None: publicIP = nodeIP # Delete floating IP if any if publicIP: result = self.deleteFloatingIP(publicIP, node) if not result["OK"]: self.log.error("Failed in deleteFloatingIP:", result["Message"]) # Destroy the VM instance if node is not None: try: result = self.__driver.destroy_node(node) if not result: return S_ERROR("Failed to destroy node: %s" % node.id) except Exception as errmsg: return S_ERROR(errmsg) return S_OK() def getVMPool(self, poolName): try: poolList = self.__driver.ex_list_floating_ip_pools() for pool in poolList: if pool.name == poolName: return S_OK(pool) except Exception as errmsg: return S_ERROR(errmsg) return S_ERROR("IP Pool with the name %s not found" % poolName) def assignFloatingIP(self, node): """ Given a node, assign a floating IP from the ipPool defined on the imageConfiguration on the CS. :Parameters: **node** - `libcloud.compute.base.Node` node object with the vm details :return: S_OK( public_ip ) | S_ERROR """ ipPool = self.parameters.get("ipPool") if ipPool: result = self.getVMPool(ipPool) if not result["OK"]: return result pool = result["Value"] try: floatingIP = pool.create_floating_ip() # Add sleep between creation and assignment time.sleep(60) self.__driver.ex_attach_floating_ip_to_node(node, floatingIP) publicIP = floatingIP.ip_address return S_OK(publicIP) except Exception as errmsg: return S_ERROR(errmsg) else: return S_ERROR("No IP pool specified") def getVMFloatingIP(self, publicIP): # We are still with IPv4 publicIP = publicIP.replace("::ffff:", "") ipPool = self.parameters.get("ipPool") if ipPool: try: floatingIP = None poolList = self.__driver.ex_list_floating_ip_pools() for pool in poolList: if pool.name == ipPool: ipList = pool.list_floating_ips() for ip in ipList: if ip.ip_address == publicIP: floatingIP = ip break break return S_OK(floatingIP) except Exception as errmsg: return S_ERROR(errmsg) else: return S_ERROR("No IP pool specified") def deleteFloatingIP(self, publicIP, node): """ Deletes a floating IP <public_ip> from the server. :param str publicIP: public IP to be deleted :param object node: node to which IP is attached :return: S_OK | S_ERROR """ # We are still with IPv4 publicIP = publicIP.replace("::ffff:", "") result = self.getVMFloatingIP(publicIP) if not result["OK"]: return result floatingIP = result["Value"] if floatingIP is None: return S_OK() try: if node is not None: self.__driver.ex_detach_floating_ip_from_node(node, floatingIP) floatingIP.delete() return S_OK() except Exception as errmsg: return S_ERROR(errmsg)
ic-hep/DIRAC
src/DIRAC/Resources/Cloud/CloudEndpoint.py
Python
gpl-3.0
18,218
[ "DIRAC" ]
2f74e503640f7951cb575d9ac715300f3173c102e0d3440195f7a5121761f06d
import unittest from pymatgen.core.lattice import Lattice from pymatgen.core.structure import Molecule from pymatgen.analysis.adsorption import * from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.util.coord import in_coord_list from pymatgen.util.testing import PymatgenTest class AdsorbateSiteFinderTest(PymatgenTest): def setUp(self): self.structure = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3.5), ["Ni"], [[0, 0, 0]]) lattice = Lattice.cubic(3.010) frac_coords = [ [0.00000, 0.00000, 0.00000], [0.00000, 0.50000, 0.50000], [0.50000, 0.00000, 0.50000], [0.50000, 0.50000, 0.00000], [0.50000, 0.00000, 0.00000], [0.50000, 0.50000, 0.50000], [0.00000, 0.00000, 0.50000], [0.00000, 0.50000, 0.00000], ] species = ["Mg", "Mg", "Mg", "Mg", "O", "O", "O", "O"] self.MgO = Structure(lattice, species, frac_coords) slabs = generate_all_slabs( self.structure, max_index=2, min_slab_size=6.0, min_vacuum_size=15.0, max_normal_search=1, center_slab=True, ) self.slab_dict = {"".join([str(i) for i in slab.miller_index]): slab for slab in slabs} self.asf_211 = AdsorbateSiteFinder(self.slab_dict["211"]) self.asf_100 = AdsorbateSiteFinder(self.slab_dict["100"]) self.asf_111 = AdsorbateSiteFinder(self.slab_dict["111"]) self.asf_110 = AdsorbateSiteFinder(self.slab_dict["110"]) self.asf_struct = AdsorbateSiteFinder(Structure.from_sites(self.slab_dict["111"].sites)) def test_init(self): asf_100 = AdsorbateSiteFinder(self.slab_dict["100"]) asf_111 = AdsorbateSiteFinder(self.slab_dict["111"]) def test_from_bulk_and_miller(self): # Standard site finding asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 1)) sites = asf.find_adsorption_sites() self.assertEqual(len(sites["hollow"]), 2) self.assertEqual(len(sites["bridge"]), 1) self.assertEqual(len(sites["ontop"]), 1) self.assertEqual(len(sites["all"]), 4) asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 0, 0)) sites = asf.find_adsorption_sites() self.assertEqual(len(sites["all"]), 3) self.assertEqual(len(sites["bridge"]), 2) asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 0), undercoord_threshold=0.1) self.assertEqual(len(asf.surface_sites), 1) # Subsurface site finding asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 1)) sites = asf.find_adsorption_sites(positions=["ontop", "subsurface", "bridge"]) self.assertEqual(len(sites["all"]), 5) self.assertEqual(len(sites["subsurface"]), 3) def test_find_adsorption_sites(self): sites = self.asf_100.find_adsorption_sites() self.assertEqual(len(sites["all"]), 3) self.assertEqual(len(sites["hollow"]), 0) self.assertEqual(len(sites["bridge"]), 2) self.assertEqual(len(sites["ontop"]), 1) sites = self.asf_111.find_adsorption_sites() self.assertEqual(len(sites["all"]), 4) sites = self.asf_110.find_adsorption_sites() self.assertEqual(len(sites["all"]), 4) sites = self.asf_211.find_adsorption_sites() # Test on structure sites = self.asf_struct.find_adsorption_sites() def test_generate_adsorption_structures(self): co = Molecule("CO", [[0, 0, 0], [0, 0, 1.23]]) structures = self.asf_111.generate_adsorption_structures(co, repeat=[2, 2, 1]) self.assertEqual(len(structures), 4) sites = self.asf_111.find_adsorption_sites() # Check repeat functionality self.assertEqual( len([site for site in structures[0] if site.properties["surface_properties"] != "adsorbate"]), 4 * len(self.asf_111.slab), ) for n, structure in enumerate(structures): self.assertArrayAlmostEqual(structure[-2].coords, sites["all"][n]) find_args = {"positions": ["hollow"]} structures_hollow = self.asf_111.generate_adsorption_structures(co, find_args=find_args) self.assertEqual(len(structures_hollow), len(sites["hollow"])) for n, structure in enumerate(structures_hollow): self.assertTrue(in_coord_list(sites["hollow"], structure[-2].coords, 1e-4)) # Check molecule not changed after rotation when added to surface co = Molecule("CO", [[1.0, -0.5, 3], [0.8, 0.46, 3.75]]) structures = self.asf_211.generate_adsorption_structures(co) self.assertEqual(co, Molecule("CO", [[1.0, -0.5, 3], [0.8, 0.46, 3.75]])) # Check translation sites = self.asf_211.find_adsorption_sites() ads_site_coords = sites["all"][0] c_site = structures[0].sites[-2] self.assertEqual(str(c_site.specie), "C") self.assertArrayAlmostEqual(c_site.coords, sites["all"][0]) # Check no translation structures = self.asf_111.generate_adsorption_structures(co, translate=False) self.assertEqual(co, Molecule("CO", [[1.0, -0.5, 3], [0.8, 0.46, 3.75]])) sites = self.asf_111.find_adsorption_sites() ads_site_coords = sites["all"][0] c_site = structures[0].sites[-2] self.assertArrayAlmostEqual(c_site.coords, ads_site_coords + np.array([1.0, -0.5, 3])) def test_adsorb_both_surfaces(self): # Test out for monatomic adsorption o = Molecule("O", [[0, 0, 0]]) adslabs = self.asf_100.adsorb_both_surfaces(o) adslabs_one = self.asf_100.generate_adsorption_structures(o) self.assertEqual(len(adslabs), len(adslabs_one)) for adslab in adslabs: sg = SpacegroupAnalyzer(adslab) sites = sorted(adslab, key=lambda site: site.frac_coords[2]) self.assertTrue(sites[0].species_string == "O") self.assertTrue(sites[-1].species_string == "O") self.assertTrue(sg.is_laue()) # Test out for molecular adsorption oh = Molecule(["O", "H"], [[0, 0, 0], [0, 0, 1]]) adslabs = self.asf_100.adsorb_both_surfaces(oh) adslabs_one = self.asf_100.generate_adsorption_structures(oh) self.assertEqual(len(adslabs), len(adslabs_one)) for adslab in adslabs: sg = SpacegroupAnalyzer(adslab) sites = sorted(adslab, key=lambda site: site.frac_coords[2]) self.assertTrue(sites[0].species_string in ["O", "H"]) self.assertTrue(sites[-1].species_string in ["O", "H"]) self.assertTrue(sg.is_laue()) def test_generate_substitution_structures(self): # Test this for a low miller index halite structure slabs = generate_all_slabs(self.MgO, 1, 10, 10, center_slab=True, max_normal_search=1) for slab in slabs: adsgen = AdsorbateSiteFinder(slab) adslabs = adsgen.generate_substitution_structures("Ni") # There should be 2 configs (sub O and sub # Mg) for (110) and (100), 1 for (111) if tuple(slab.miller_index) != (1, 1, 1): self.assertEqual(len(adslabs), 2) else: self.assertEqual(len(adslabs), 1) # Test out whether it can correctly dope both # sides. Avoid (111) becasue it is not symmetric if tuple(slab.miller_index) != (1, 1, 1): adslabs = adsgen.generate_substitution_structures("Ni", sub_both_sides=True, target_species=["Mg"]) # Test if default parameters dope the surface site for i, site in enumerate(adslabs[0]): if adsgen.slab[i].surface_properties == "surface" and site.species_string == "Mg": print( adslabs[0][i].surface_properties, adsgen.slab[i].surface_properties, ) self.assertTrue(adslabs[0][i].surface_properties == "substitute") self.assertTrue(adslabs[0].is_symmetric()) # Correctly dope the target species self.assertEqual( adslabs[0].composition.as_dict()["Mg"], slab.composition.as_dict()["Mg"] - 2, ) # There should be one config (sub Mg) self.assertEqual(len(adslabs), 1) def test_functions(self): slab = self.slab_dict["111"] rot = get_rot(slab) reoriented = reorient_z(slab) if __name__ == "__main__": unittest.main()
davidwaroquiers/pymatgen
pymatgen/analysis/tests/test_adsorption.py
Python
mit
8,755
[ "pymatgen" ]
4f5ecac52faa0b5fb5fdb29a003fb0d6d6e7f5054cbb64b75574cb00b9d0a263
#!/usr/bin/env python ######################################## # to modify the NetCDF files ######################################## #First import the netcdf4 library from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/ import numpy as np import sys,getopt import math import datetime as DT import netcdftime from netcdftime import utime from datetime import datetime from matplotlib.dates import DayLocator, HourLocator, DateFormatter, drange, date2num, num2date from dateutil.relativedelta import relativedelta from numpy import arange import numpy as np import pylab as pl import parser import pandas as pd from pandas import * import os from datetime import timedelta import Scientific.IO.NetCDF as IO import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.ticker as mtick import matplotlib.lines as lines import matplotlib.dates as dates from matplotlib.dates import YEARLY, DateFormatter, rrulewrapper, RRuleLocator, drange from mpl_toolkits.basemap import Basemap , addcyclic from matplotlib.colors import LinearSegmentedColormap import textwrap pl.close('all') #=================================================== get opts input file def main(argv): inputfile = '' outputfile = '' try: opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="]) except getopt.GetoptError: print 'test.py -i <inputfile> -o <outputfile>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'test.py -i <inputfile> -o <outputfile>' sys.exit() elif opt in ("-i", "--ifile"): inputfile = arg elif opt in ("-o," "--ofile"): outputfile = arg print 'INputfile:', inputfile print 'Outputfile:', outputfile if __name__ == "__main__": main(sys.argv[1:]) #=================================================== GCMvar='psl' RELYvar='msl' GCMinputf='psl_6hrPlev_HadGEM2-ES_historical_r1i1p1_198412010600-198512010000.nc' #GCMinputf='psl_6hrPlev_HadGEM2-ES_historical_r1i1p1_198412010600-198512010000.standard.nc' RELYinputf='msl_EIN75.198412010000-198512010000.nc.remap.nc.360.nc' #RELYinputf='msl_EIN75.198412010000-198512010000.nc.remap.nc' #=================================================== ########################### units of time #=================================================== #=================================================== to read # Read en existing NetCDF file and create a new one # f is going to be the existing NetCDF file from where we want to import data GCMf=Dataset(GCMinputf,'r+') # r is for read only RELYf=Dataset(RELYinputf,'r') # r is for read only # Extract data from NetCDF file print GCMf.variables.keys() print GCMf.dimensions.keys() GCMvar3D=GCMf.variables[GCMvar][:,:,:] RELYvar3D=RELYf.variables[RELYvar][:,:,:] LATITUDE=len(GCMvar3D[0,:,0]) LONGITUDE=len(GCMvar3D[0,0,:]) TIME=len(GCMvar3D[:,0,0]) TIME2=len(RELYvar3D[:,0,0]) #print Latitude,Longitude,Timesize #=================================================== set up variables to use GCMvar2D=GCMvar3D.reshape(TIME,-1) RELYvar2D=RELYvar3D.reshape(TIME2,-1) # create a 3D variable to hold the Mean bias as GCMvar3D in size. #MeanBias=GCMvar3D # NOTE: this method leading to error: when create the second GCMdf in the loop # (t=2) GCMvar3D changes their value of first month to that of MonthlyMeanBias # really bizarre. So, create it as 3D zeros array and then reshape it MeanBias=np.zeros(TIME*LATITUDE*LONGITUDE).reshape(TIME,LATITUDE,LONGITUDE) print MeanBias.shape #--------------------------------------------------- # to test the reshap is working well or not print '======== 3D :=======' print RELYvar3D print '======== 2D :=======' print RELYvar2D print '======== 2D reshape:=======' RELYvar2DT=RELYvar2D.reshape(TIME2,LATITUDE,LONGITUDE) print RELYvar2DT if (RELYvar3D.all()==RELYvar2DT.all()): print 'OKOKOKOK' #quit() #--------------------------------------------------- #quit() #=================================================== to datetime GCMtime=netcdftime.num2date(GCMf.variables['time'][:],GCMf.variables['time'].units,calendar='360_day') #GCMtime=netcdftime.num2date(GCMf.variables['time'][:],GCMf.variables['time'].units) #print GCMtime[9].year print type(GCMtime) #print [str(i) for i in GCMtime[:]] #GCMindex=[DT.datetime.strptime(t,'%Y-%m-%d %H:%M:%S') for t in [str(i) for i in GCMtime[:]]] #print GCMindex #print DT.datetime.strptime('2002-02-30 4:00:09','%Y-%m-%d %H:%M:%S') # NOTE: this day donot exits in Python #=================================================== to datetime # NOTE: when I use the kew word 'calendar='360_day', it gives # wrong value for ONLY this netcdf file, GCMtime is quite OK. #cdftime = utime(RELYf.variables['time'].units,calendar='360_day') #cdftime = utime(RELYf.variables['time'].units) #RELYtime=[cdftime.num2date(t) for t in RELYf.variables['time'][:]] RELYtime=netcdftime.num2date(RELYf.variables['time'][:],RELYf.variables['time'].units,calendar='360_day') #RELYtime=netcdftime.num2date(RELYf.variables['time'][:],RELYf.variables['time'].units) #print type(RELYtime) #RELYindex=[DT.datetime.strptime(t,'%Y-%m-%d %H:%M:%S') for t in [str(i) for i in RELYtime[:]]] #print type(RELYindex) #d={'gcm':pd.Series(GCMvar2D,index=GCMtime),'rely':pd.Series(RELYvar2D,index=RELYtime)} #ddf=pd.DataFrame(d) # Series should be one dimension #quit() #for j in range(10,len(GCMvar3D[0,:,0])): #=================================================== to DataFrame #GCMdf=pd.DataFrame({'year':[t.year for t in GCMtime], #'month':[t.month for t in GCMtime], #'day':[t.day for t in GCMtime], #'hour':[t.hour for t in GCMtime], #'sdfj':GCMf.variables[GCMvar][:,j,:]}) # NOTE: this method is too time cosuming, about 7 hours to finish this code #GCMdf=pd.DataFrame(GCMf.variables[GCMvar][:,0,0],GCMindex) # NOTE: cannot convert 360_day np.arrary objects read from netcdf # to datetime objects #quit() #--------------------------------------------------- GCMdf=pd.DataFrame(GCMvar2D) GCMdf['year']=[t.year for t in GCMtime] GCMdf['month']=[t.month for t in GCMtime] GCMdf['day']=[t.day for t in GCMtime] GCMdf['hour']=[t.hour for t in GCMtime] #print GCMdf.dtypes #print GCMdf.loc[0:9,['year','month','day','hour']] #print 'GCMdf' #print GCMdf.iloc[0:60,:] #quit() #=================================================== to DataFrame #RELYdf=pd.DataFrame({'year':[t.year for t in RELYtime], #'month':[t.month for t in RELYtime], #'day':[t.day for t in RELYtime], #'hour':[t.hour for t in RELYtime], #RELYvar:RELYf.variables[RELYvar][:,j,:]}) # NOTE: this method is too time cosuming, about 7 hours to finish this code #RELYdf=pd.DataFrame(RELYf.variables[RELYvar][:,0,0],RELYindex) # NOTE: cannot convert 360_day np.arrary objects read from netcdf # to datetime objects RELYdf=pd.DataFrame(RELYvar2D,dtype='float32') RELYdf['year']=[t.year for t in RELYtime] RELYdf['month']=[t.month for t in RELYtime] RELYdf['day']=[t.day for t in RELYtime] RELYdf['hour']=[t.hour for t in RELYtime] #print 'RELYdf' #print RELYdf.iloc[2,:] #print GCMdf.loc[0:9,['year','month','day','hour']] #quit() #=================================================== calculate #print GCMdf.stack(0) #print RELYdf.asfreq('6H',method='pad',calendar='360_day') # NOTE: asfreq and stack are not satisfactory to this task. # for the fromer is because of 360_day calendar. print "---------" ##=================================================== for test calculation #print RELYdf.loc[0] ## get monthly msl value #print RELYdf.loc[0][:] ## get value of psl in the same year & month #print GCMdf[(GCMdf['year'] == RELYdf['year'][0]) & (GCMdf['month'] == RELYdf['month'][0])][:] ##quit() ## values = value #print GCMdf.dtypes #print RELYdf.dtypes #print RELYdf.iloc[0,:] #print RELYdf.iloc[0,0:LONGITUDE*LATITUDE].shape #196 ##quit() #print np.array(GCMdf[(GCMdf['year'] == RELYdf['year'][0]) #& (GCMdf['month'] == RELYdf['month'][0])]) #print np.array(GCMdf[(GCMdf['year'] == RELYdf['year'][0]) #& (GCMdf['month'] == RELYdf['month'][0])])[:,0:LONGITUDE*LATITUDE].shape # 119 ##quit() #--------------------------------------------------- ##print [t for t in np.array(GCMdf[(GCMdf['year'] == RELYdf['year'][0]) ##& (GCMdf['month'] == RELYdf['month'][0])][:])] #print np.array([np.subtract(t,RELYdf.iloc[0,0:LONGITUDE*LATITUDE]) #for t in np.array(GCMdf[(GCMdf['year'] == RELYdf['year'][0]) #& (GCMdf['month'] == RELYdf['month'][0])])[:,0:LONGITUDE*LATITUDE]]) #print np.array([np.subtract(t,RELYdf.iloc[0,0:LONGITUDE*LATITUDE]) #for t in np.array(GCMdf[(GCMdf['year'] == RELYdf['year'][0]) #& (GCMdf['month'] == RELYdf['month'][0])])[:,0:LONGITUDE*LATITUDE]]).shape #--------------------------------------------------- #print RELYdf.iloc[1,:LONGITUDE*LATITUDE] #print GCMdf.iloc[1,:LONGITUDE*LATITUDE] #quit() #=================================================== loop in time series: K=0 for t in RELYdf.index: #for t in [1,2]: #print RELYdf.index MonthlyMeanBias=np.array([np.subtract(x,RELYdf.iloc[t,0:LONGITUDE*LATITUDE]) for x in np.array(GCMdf[ (GCMdf['year'] == RELYdf['year'][t]) & (GCMdf['month'] == RELYdf['month'][t]) & (GCMdf['hour'] == RELYdf['hour'][t]) ])[:,0:LONGITUDE*LATITUDE]]) #--------------------------------------------------- #print "GCMvar3D2:" #print [x for x in GCMvar3D[0:30,:]] # right #print "GCMdf:wrong" #print GCMdf.iloc[0:60,:] # the first month is wrong #print GCMdf.values #--------------------------------------------------- petit test: #print " GCM values in this month =======121" #print np.array([x for x in np.array(GCMdf[ #(GCMdf['year'] == RELYdf['year'][t]) & #(GCMdf['month'] == RELYdf['month'][t]) & #(GCMdf['hour'] == RELYdf['hour'][t]) #])]).shape #print np.array([x for x in np.array(GCMdf[ #(GCMdf['year'] == RELYdf['year'][t]) & #(GCMdf['month'] == RELYdf['month'][t]) & #(GCMdf['hour'] == RELYdf['hour'][t]) #])]).shape #print " GCM values in this month =======212" #GCMvalue= np.array([x for x in np.array(GCMdf[ #(GCMdf['year'] == RELYdf['year'][t]) & #(GCMdf['month'] == RELYdf['month'][t]) & #(GCMdf['hour'] == RELYdf['hour'][t]) #])]) ##])[:,0:LONGITUDE*LATITUDE]]) #print GCMvalue #print GCMvalue.shape #--------------------------------------------------- ##quit() #print "RELY values in this month =======" #print np.array(RELYdf.iloc[t,0:LONGITUDE*LATITUDE]) #print np.array(RELYdf.iloc[t,:]) #print np.array(RELYdf.iloc[t,:]).shape #print "MonthlyMeanBias =======" #print MonthlyMeanBias print MonthlyMeanBias.shape #quit() #--------------------------------------------------- end of petit test: L=len(MonthlyMeanBias[:,0]) MeanBias[K:K+L,:]=MonthlyMeanBias.reshape(L,LATITUDE,LONGITUDE) #print " MeanBias =======" #print MeanBias[K:K+L,j,:] print " time = "+str(RELYtime[t])+" t= "+str(t)+", L= "+str(L)+", MeanBias len= "+str(len(MeanBias[K:K+L,0,0]))+" k= " +str(K)+", end= "+str(K+L) K=K+L # NOTE:needed to be reseted to zeros #quit() #=================================================== check the calculation #NOTE: this examination is running in time and Lat(j) dimensions. #print " NOTE: examination in Day (in month) and Latitude(j) dimensions." #dateindex1=np.random.randint(0,L/2) #lonindex1=np.random.randint(0,LONGITUDE*LATITUDE/2) #dateindex2=np.random.randint(L/2,L) #lonindex2=np.random.randint(L/2,LONGITUDE*LATITUDE) #print "random Day index = " +str(dateindex1) #print "random lonindex = " +str(lonindex1) #lonindex1=43 #GCMvalue=np.array(GCMdf[ #(GCMdf['year'] == RELYdf['year'][t]) & #(GCMdf['month'] == RELYdf['month'][t]) & #(GCMdf['hour'] == RELYdf['hour'][t]) #])[dateindex1:dateindex2,lonindex1:lonindex2] #])[:,lonindex1:lonindex1+20] #print GCMvalue.shape #MeanBiasValue=np.array([x for x in np.array(MonthlyMeanBias)] #)[:,lonindex1:lonindex1+20] #)[dateind,x1:dateindex2,lonindex1:lonindex2] #print '=============' #print '============= GCM values' #print GCMvalue[:,lonindex1:lonindex1+20] #print '=============' #print '============= MonthlyMeanBias' #print MonthlyMeanBias[:,lonindex1:lonindex1+20] #print '=============' #print '=============' #print "GCM value - MeanBiasValue = "+str(GCMvalue[:,0:LONGITUDE]-MonthlyMeanBias) #print "Defaule RELYvalue = "+str(RELYdf.iloc[t,lonindex1:lonindex1+20]) #for x in np.array(MonthlyMeanBias)[:,:]])[np.random.randint(0,L),np.random.randint(0,LONGITUDE*LATITUDE)] #=================================================== print results print "========================= GCM data:==========================" print GCMvar3D print "========================= Reanalysis Data:==========================" print RELYvar3D print "========================= montly Mean Bias:==========================" print MeanBias print "========================= Corrected GCM data:==========================" #=================================================== check before WRITING: print " GCMvar3D shape = " + str(GCMvar3D.shape) print " MeanBias shape = " + str(MeanBias.shape) #=================================================== Writing GCMf.variables[GCMvar][:,:,:] = MeanBias GCMf.close() RELYf.close() #=================================================== final correction #=================================================== final correction # produce the corrected GCM LBC: by MeanBias + future GCM Futureinputf=('/Users/tang/climate/Bias-Correction/Future/' 'psl_6hrPlev_HadGEM2-ES_historical_r1i1p1_199412010600-199512010000.nc') Futuref=Dataset(Futureinputf,'r+') # r is for read only # Extract data from NetCDF file print Futuref.variables.keys() print Futuref.dimensions.keys() FutureLBC=np.add(Futuref.variables[GCMvar][:,:,:], MeanBias) print " shape of FutureLBC "+str(FutureLBC.shape) print " starting to write... " Futuref.variables[GCMvar][:,:,:] = FutureLBC Futuref.close() #=================================================== end of writing #=================================================== delete the in processing file quit()
CopyChat/Plotting
Python/Psl_correct.py
Python
gpl-3.0
14,522
[ "NetCDF" ]
1ac697082fd2b3399f1eec098594408ded0cbd2f4b5ed03ff25a8af015281073
"""Python to lua translator class""" import ast import os from .config import Config from .nodevisitor import NodeVisitor class Translator: """Python to lua main class translator""" def __init__(self, config=None, show_ast=False): self.config = config if config is not None else Config() self.show_ast = show_ast self.output = [] def translate(self, pycode): """Translate python code to lua code""" py_ast_tree = ast.parse(pycode) visitor = NodeVisitor(config=self.config) if self.show_ast: print(ast.dump(py_ast_tree)) visitor.visit(py_ast_tree) self.output = visitor.output return self.to_code() def to_code(self, code=None, indent=0): """Create a lua code from the compiler output""" code = code if code is not None else self.output def add_indentation(line): """Add indentation to the given line""" indentation_width = 4 indentation_space = " " indent_copy = max(indent, 0) return indentation_space * indentation_width * indent_copy + line lines = [] for line in code: if isinstance(line, str): lines.append(add_indentation(line)) elif isinstance(line, list): sub_code = self.to_code(line, indent + 1) lines.append(sub_code) return "\n".join(lines) @staticmethod def get_luainit(filename="luainit.lua"): """Get lua initialization code.""" script_name = os.path.realpath(__file__) folder = os.path.dirname(script_name) luainit_path = os.path.join(folder, filename) with open(luainit_path) as file: return file.read() return ""
NeonMercury/python-lua
pythonlua/translator.py
Python
apache-2.0
1,804
[ "VisIt" ]
06155c9c5629e09aad28ceabd813ead9ee019a883f9d8ffd7322c1efcf65ff83
# coding: utf-8 # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests that walk through Course Builder pages.""" __author__ = 'Sean Lip' import __builtin__ import copy import cStringIO import csv import datetime import logging import os import re import shutil import sys import time import types import urllib import zipfile import actions from actions import assert_contains from actions import assert_contains_all_of from actions import assert_does_not_contain from actions import assert_equals from controllers_review import PeerReviewControllerTest from controllers_review import PeerReviewDashboardAdminTest from review_stats import PeerReviewAnalyticsTest import appengine_config from common import crypto from common.utils import Namespace from common import tags from common import users from controllers import lessons from controllers import sites from controllers import utils from controllers.utils import XsrfTokenManager import main from models import config from models import courses from models import entities from models import entity_transforms from models import jobs from models import models from models import student_work from models import transforms from models import vfs from models.courses import Course import modules.admin.admin from modules.announcements.announcements import AnnouncementEntity from modules import course_explorer from modules import search from tools import verify from tools.etl import etl from tools.etl import etl_lib from tools.etl import examples with actions.PreserveOsEnvironDebugMode(): from tools.etl import remote from tools.etl import testing from google.appengine.api import memcache from google.appengine.api import namespace_manager from google.appengine.ext import db import webapp2 # A number of data files in a test course. COURSE_FILE_COUNT = 70 # There is an expectation in our tests of automatic import of data/*.csv files, # which is achieved below by selecting an alternative factory method. courses.Course.create_new_default_course = ( courses.Course.custom_new_default_course_for_test) def _add_data_entity(app_context, entity_type, data): """Insert new entity into a given namespace.""" old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(app_context.get_namespace_name()) new_object = entity_type() new_object.data = data new_object.put() return new_object finally: namespace_manager.set_namespace(old_namespace) def _assert_identical_data_entity_exists(app_context, test_object): """Checks a specific entity exists in a given namespace.""" old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(app_context.get_namespace_name()) entity_class = test_object.__class__ existing_object = entity_class().get(test_object.key()) assert existing_object assert existing_object.data == test_object.data assert existing_object.key().id() == test_object.key().id() finally: namespace_manager.set_namespace(old_namespace) class WSGIRoutingTest(actions.TestBase): """Test WSGI-like routing and error handling in sites.py.""" def setUp(self): super(WSGIRoutingTest, self).setUp() sites.setup_courses('') actions.login('test@example.com', is_admin=True) def tearDown(self): sites.reset_courses() super(WSGIRoutingTest, self).tearDown() def _validate_handlers(self, routes, allowed_types, ignore_modules=None): if not ignore_modules: ignore_modules = set() for _, handler_class in routes: known = False # check inheritance for _type in allowed_types: if issubclass(handler_class, _type): known = True break # check modules class_name = handler_class.__module__ for _module in ignore_modules: if class_name.startswith(_module): known = True break if not known: raise Exception( 'Unsupported handler type: %s.', handler_class) def test_all_namespaced_handlers_are_of_known_types(self): supported = [ utils.ApplicationHandler, utils.CronHandler, utils.RESTHandlerMixin, utils.StarRouteHandlerMixin, sites.ApplicationRequestHandler] self._validate_handlers(main.app_routes, supported) self._validate_handlers( main.global_routes, supported + [ course_explorer.student.AssetsHandler, course_explorer.student.BaseStudentHandler, course_explorer.student.IndexPageHandler, models.StudentLifecycleObserver, search.search.AssetsHandler, tags.ResourcesHandler, ], ignore_modules=[ 'mapreduce.lib', 'mapreduce.handlers', 'mapreduce.main', 'mapreduce.status', 'pipeline.pipeline', 'pipeline.status_ui']) def getApp(self): """Setup test WSGI app with variety of test handlers.""" class _Aborting404Handler(utils.ApplicationHandler): def get(self): self.abort(404) def post(self): raise Exception('Intentional error') class _EmptyError404Handler(utils.ApplicationHandler): def get(self): self.error(404) def post(self): raise Exception('Intentional error') class _FullError404Handler(utils.ApplicationHandler): def get(self): self.error(404) self.response.out.write('Failure') class _Vocal200Handler(utils.ApplicationHandler): def get(self): # this handler is bound to only one path self.response.out.write('Success') class _VocalRegexBound200Handler(utils.ApplicationHandler): def get(self, path): # WSGI will pass path in here because this handler is bound to a # regex Route self.response.out.write( 'Success on "%s"' % path) class _VocalStar200Handler( utils.ApplicationHandler, utils.StarRouteHandlerMixin): def get(self): # WSGI will NOT pass path in here because this handler is NOT # bound to a regex Route; we need to extract and analyze path # directly self.response.out.write( 'Success on "%s"' % self.request.path) all_routes = [ ('/a', _Vocal200Handler), (r'/b(.*)', _VocalRegexBound200Handler), ('/c', _VocalStar200Handler), ('/d', _Aborting404Handler), ('/e', _EmptyError404Handler), ('/f', _FullError404Handler)] global_routes = [] + all_routes namespaced_routes = [] + all_routes sites.ApplicationRequestHandler.bind(namespaced_routes) app_routes = [(r'(.*)', sites.ApplicationRequestHandler)] app = webapp2.WSGIApplication() app.router = sites.WSGIRouter(global_routes + app_routes) app.handle_exception = sites.ApplicationRequestHandler.handle_exception return app def test_global_routes(self): # this route works without courses; it does NOT support '*' self.assertEqual(200, self.testapp.get('/a').status_code) self.assertEqual(404, self.testapp.get( '/a/any/other', expect_errors=True).status_code) # this route works without courses; it does support '*' via regex Route response = self.testapp.get('/b') self.assertEqual(200, response.status_code) self.assertEqual('Success on ""', response.body) response = self.testapp.get('/b/any/other') self.assertEqual(200, response.status_code) self.assertEqual('Success on "/any/other"', response.body) # this route works without courses; it should support '*' via mixin # class StarRouteHandlerMixin, but it does not; self.assertEqual(200, self.testapp.get('/c').status_code) # TODO(psimakov): this is counter intuitive: we marked the handler with # mixin class StarRouteHandlerMixin, but it does not support '*'; the # root cause is in sites,py; the request gets there, but we do not # dispatch to any routes in there if request does not start with a # course prefix; thus global routes will arrive into sites.py # dispatcher, but will always 404 because they can not be mapped to any # of the courses; I am not sure if we can or fix this, but this note # will capture the details self.assertEqual(404, self.testapp.get( '/c/any/other', expect_errors=True).status_code) # these routes always returns 404 and body of response generated by # the default error handler for path in ['/d', '/e']: response = self.testapp.get(path, expect_errors=True) self.assertEqual(404, response.status_code) self.assertEqual( 'Unable to access requested page. HTTP status code: 404.', response.body) response = self.testapp.post(path, expect_errors=True) self.assertEqual(500, response.status_code) self.assertEqual( 'Server error. HTTP status code: 500.', response.body) # this route always returns 404 and body of response generated by # the handler itself response = self.testapp.get('/f', expect_errors=True) self.assertEqual(404, response.status_code) self.assertEqual('Failure', response.body) def test_course_routes(self): # define courses sites.setup_courses('course:/foo::ns_foo, course:/bar::ns_bar') # retest all global routes self.test_global_routes() self.assertEqual(200, self.testapp.get('/foo/a').status_code) # regex routes don't work in our courses; one must use mixin class # StarRouteHandlerMixin self.assertEqual(404, self.testapp.get( '/foo/b', expect_errors=True).status_code) self.assertEqual(404, self.testapp.get( '/foo/b/any/other', expect_errors=True).status_code) # this route uses mixin class StarRouteHandlerMixin correctly self.assertEqual(200, self.testapp.get('/foo/c').status_code) response = self.testapp.get('/foo/c/any/other') self.assertEqual(200, response.status_code) self.assertEqual('Success on "/foo/c/any/other"', response.body) # these routes always returns 404 and a body of response generated by # the default error handler for path in ['/d', '/e']: response = self.testapp.get('/foo%s' % path, expect_errors=True) self.assertEqual(404, response.status_code) self.assertEqual( 'Unable to access requested page in the course /foo. ' 'HTTP status code: 404.', response.body) response = self.testapp.post('/foo%s' % path, expect_errors=True) self.assertEqual(500, response.status_code) self.assertEqual( 'Server error accessing the course /foo. ' 'HTTP status code: 500.', response.body) # this route always returns 404 and a body of response generated by # the handler itself response = self.testapp.get('/foo/f', expect_errors=True) self.assertEqual(404, response.status_code) self.assertEqual('Failure', response.body) class ExtensionSwitcherTests(actions.TestBase): _ADMIN_EMAIL = 'admin@foo.com' _COURSE_NAME = 'extension_class' _KEY = 'test_switcher_key' _URI = 'test/switcher' class _Handler_1(utils.ApplicationHandler): def get(self): self.response.out.write('handler 1') class _Handler_2(utils.ApplicationHandler): def get(self): self.response.out.write('handler 2') def setUp(self): super(ExtensionSwitcherTests, self).setUp() self.base = '/' + self._COURSE_NAME self.app_context = actions.simple_add_course( self._COURSE_NAME, self._ADMIN_EMAIL, 'Extension Switcher') self.old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace('ns_%s' % self._COURSE_NAME) switcher = utils.ApplicationHandlerSwitcher(self._KEY) sites.ApplicationRequestHandler.urls_map['/' + self._URI] = ( switcher.switch(self._Handler_1, self._Handler_2)) def tearDown(self): courses.Course.ENVIRON_TEST_OVERRIDES = {} del sites.Registry.test_overrides[sites.GCB_COURSES_CONFIG.name] del sites.ApplicationRequestHandler.urls_map['/' + self._URI] namespace_manager.set_namespace(self.old_namespace) super(ExtensionSwitcherTests, self).tearDown() def test_extension_enabled(self): courses.Course.ENVIRON_TEST_OVERRIDES = {'course': {self._KEY: False}} self.assertEquals('handler 1', self.get(self._URI).body) def test_extension_disabled(self): courses.Course.ENVIRON_TEST_OVERRIDES = {'course': {self._KEY: True}} self.assertEquals('handler 2', self.get(self._URI).body) class InfrastructureTest(actions.TestBase): """Test core infrastructure classes agnostic to specific user roles.""" def test_fs_cleaned_up_when_memcache_begin_or_end_asserts(self): config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True try: for method in [ models.MemcacheManager.begin_readonly, models.MemcacheManager.end_readonly]: models.MemcacheManager.begin_readonly() models.MemcacheManager.set('a', 'aaa') # force error state models.MemcacheManager._READONLY_REENTRY_COUNT = -1 with self.assertRaises(AssertionError): method() self.assertEquals(None, models.MemcacheManager._LOCAL_CACHE) self.assertEquals(False, models.MemcacheManager._IS_READONLY) self.assertEquals( 0, models.MemcacheManager._READONLY_REENTRY_COUNT) self.assertEquals( None, models.MemcacheManager._READONLY_APP_CONTEXT) finally: del config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] def test_memcache_begin_end_reentrancy(self): config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True try: self.assertEquals(None, models.MemcacheManager._LOCAL_CACHE) models.MemcacheManager.begin_readonly() models.MemcacheManager.set('a', 'aaa') models.MemcacheManager.begin_readonly() self.assertEquals( 'aaa', models.MemcacheManager._LOCAL_CACHE['']['a']) models.MemcacheManager.begin_readonly() self.assertEquals( 'aaa', models.MemcacheManager._LOCAL_CACHE['']['a']) models.MemcacheManager.end_readonly() self.assertEquals( 'aaa', models.MemcacheManager._LOCAL_CACHE['']['a']) models.MemcacheManager.end_readonly() self.assertEquals( 'aaa', models.MemcacheManager._LOCAL_CACHE['']['a']) models.MemcacheManager.end_readonly() self.assertEquals(None, models.MemcacheManager._LOCAL_CACHE) finally: del config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] def test_memcache_fails_missmatched_begin_end(self): config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True models.MemcacheManager.begin_readonly() models.MemcacheManager.set('a', 'aaa') models.MemcacheManager.end_readonly() with self.assertRaises(AssertionError): models.MemcacheManager.end_readonly() self.assertEquals(None, models.MemcacheManager._LOCAL_CACHE) del config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] def test_memcache_can_be_cleared_if_end_readonly_is_not_called(self): config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True models.MemcacheManager.begin_readonly() models.MemcacheManager.set('a', 'aaa') models.MemcacheManager.begin_readonly() models.MemcacheManager.begin_readonly() self.assertEquals('aaa', models.MemcacheManager._LOCAL_CACHE['']['a']) models.MemcacheManager.clear_readonly_cache() del config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] def test_memcache_get_all_caching(self): config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True with Namespace('ns_test'): for index in range(0, 100): models.QuestionDAO.create_question( {'data': 'data-%s' % index}, models.QuestionDTO.MULTIPLE_CHOICE) questions_1 = models.QuestionDAO.get_all() old_all = models.QuestionDAO.ENTITY.all models.QuestionDAO.ENTITY.all = None questions_2 = models.QuestionDAO.get_all() models.QuestionDAO.ENTITY.all = old_all self.assertEquals(100, len(questions_1)) self.assertEquals(100, len(questions_2)) for index in range(0, 100): self.assertEquals( questions_1[index].dict, questions_2[index].dict) self.assertEquals( questions_1[index].id, questions_2[index].id) def test_value_cached_in_one_namespace_invisible_in_another(self): """Value cached in one namespace is not visible in another.""" # set value and check it's visible in one namespace old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace('test_memcache_manager_a') models.MemcacheManager.set('foo', 'bar') assert 'bar' == models.MemcacheManager.get('foo') finally: namespace_manager.set_namespace(old_namespace) # check same value is not visible in another namespace old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace('test_memcache_manager_b') assert not models.MemcacheManager.get('foo') finally: namespace_manager.set_namespace(old_namespace) # check same value is not visible in default namespace assert not models.MemcacheManager.get('foo') # check same value is not visible in None namespace old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(None) assert not models.MemcacheManager.get('foo') finally: namespace_manager.set_namespace(old_namespace) # set value and check it's visible in default namespace models.MemcacheManager.set('foo', 'bar') assert 'bar' == models.MemcacheManager.get('foo') # check value is not visible in another namespace old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace('test_memcache_manager_c') assert not models.MemcacheManager.get('foo') finally: namespace_manager.set_namespace(old_namespace) def test_response_content_type_is_application_json_in_utf_8(self): response = self.testapp.get( '/rest/config/item?key=gcb_config_update_interval_sec') self.assertEqual( 'application/javascript; charset=utf-8', response.headers['Content-Type']) def test_response_does_not_echo_unescaped_tags(self): response = self.testapp.get( '/rest/config/item?key=<script>alert(2)</script>').body self.assertNotIn('<script>', response) self.assertNotIn('</script>', response) self.assertIn( '\\\\u003Cscript\\\\u003Ealert(2)\\\\u003C/script\\\\u003E', response) def test_xsrf_token_manager(self): """Test XSRF token operations.""" # os.environ['AUTH_DOMAIN'] = 'test_domain' # os.environ['APPLICATION_ID'] = 'test app' # Issues and verify anonymous user token. action = 'test-action' token = utils.XsrfTokenManager.create_xsrf_token(action) assert '/' in token assert utils.XsrfTokenManager.is_xsrf_token_valid(token, action) # Impersonate real user. os.environ['USER_EMAIL'] = 'test_email' os.environ['USER_ID'] = 'test_id' # Issues and verify real user token. action = 'test-action' token = utils.XsrfTokenManager.create_xsrf_token(action) assert '/' in token assert utils.XsrfTokenManager.is_xsrf_token_valid(token, action) # Check forged time stamp invalidates token. parts = token.split('/') assert len(parts) == 2 forgery = '%s/%s' % (long(parts[0]) + 1000, parts[1]) assert forgery != token assert not utils.XsrfTokenManager.is_xsrf_token_valid(forgery, action) # Check token properly expires. action = 'test-action' time_in_the_past = long( time.time() - utils.XsrfTokenManager.XSRF_TOKEN_AGE_SECS) old_token = utils.XsrfTokenManager._create_token( action, time_in_the_past) assert not utils.XsrfTokenManager.is_xsrf_token_valid(old_token, action) # Clean up. # del os.environ['APPLICATION_ID'] # del os.environ['AUTH_DOMAIN'] del os.environ['USER_EMAIL'] del os.environ['USER_ID'] def test_import_course(self): """Tests importing one course into another.""" # Setup courses. sites.setup_courses( 'course:/a::ns_a, course:/b::ns_b, course:/c::ns_c, course:/:/') # Validate the courses before import. all_courses = sites.get_all_courses() dst_app_context_a = all_courses[0] dst_app_context_b = all_courses[1] dst_app_context_c = all_courses[2] src_app_context = all_courses[3] dst_course_a = courses.Course(None, app_context=dst_app_context_a) dst_course_b = courses.Course(None, app_context=dst_app_context_b) dst_course_c = courses.Course(None, app_context=dst_app_context_c) src_course = courses.Course(None, app_context=src_app_context) new_course_keys = [ 'admin_user_emails', 'announcement_list_email', 'announcement_list_url', 'blurb', 'forum_email', 'forum_embed_url', 'forum_url', 'google_analytics_id', 'google_tag_manager_id', 'instructor_details', 'main_video', 'start_date'] init_settings = dst_course_a.app_context.get_environ() assert 'assessment_confirmations' not in init_settings for key in new_course_keys: assert key not in init_settings['course'] assert not dst_course_a.get_units() assert not dst_course_b.get_units() assert 12 == len(src_course.get_units()) # Import 1.2 course into 1.3. errors = [] src_course_out, dst_course_out_a = dst_course_a.import_from( src_app_context, errors) if errors: raise Exception(errors) assert len( src_course.get_units()) == len(src_course_out.get_units()) assert len( src_course_out.get_units()) == len(dst_course_out_a.get_units()) final_settings = dst_course_a.app_context.get_environ() assert 'assessment_confirmations' in final_settings final_course_settings = set( init_settings['course'].keys()).intersection( set(final_settings['course'].keys())) self.assertEqual( set(init_settings['course'].keys()), final_course_settings) for key in new_course_keys: assert key in final_settings['course'] # add dependent entities so we can check they make it through the import dependents = [] for dependent_entity_class in courses.COURSE_CONTENT_ENTITIES: dependents.append(_add_data_entity( dst_course_out_a.app_context, dependent_entity_class, 'Test "%s"' % str( dependent_entity_class))) assert dependents # Import 1.3 course into 1.3. errors = [] src_course_out_a, dst_course_out_b = dst_course_b.import_from( dst_app_context_a, errors) if errors: raise Exception(errors) assert src_course_out_a.get_units() == dst_course_out_b.get_units() for dependent in dependents: _assert_identical_data_entity_exists( dst_course_out_b.app_context, dependent) # Import imported 1.3 course into 1.3. errors = [] _, dst_course_out_c = dst_course_c.import_from( dst_app_context_b, errors) if errors: raise Exception(errors) assert dst_course_out_c.get_units() == dst_course_out_b.get_units() for dependent in dependents: _assert_identical_data_entity_exists( dst_course_out_c.app_context, dependent) # Test delete. units_to_delete = dst_course_a.get_units() deleted_count = 0 for unit in units_to_delete: assert dst_course_a.delete_unit(unit) deleted_count += 1 dst_course_a.save() assert deleted_count == len(units_to_delete) assert not dst_course_a.get_units() assert not dst_course_a.app_context.fs.list(os.path.join( dst_course_a.app_context.get_home(), 'assets/js/')) # Clean up. sites.reset_courses() def test_import_13_assessment(self): # Setup courses. sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/') all_courses = sites.get_all_courses() src_app_context = all_courses[0] dst_app_context = all_courses[1] src_course = courses.Course(None, app_context=src_app_context) dst_course = courses.Course(None, app_context=dst_app_context) # Add an assessment src_assessment = src_course.add_assessment() self.assertEqual('A', src_assessment.type) src_assessment.title = 'Test Assessment' src_assessment.release_date = '2015-01-01 12:15' src_assessment.now_available = True src_assessment.properties = {'key': 'value'} src_assessment.weight = 3.14 src_assessment.html_content = 'content' src_assessment.html_check_answers = 'check' src_assessment.html_review_form = 'review' src_assessment.workflow_yaml = 'a: 3' src_course.save() errors = [] dst_course.import_from(src_app_context, errors) self.assertEqual(0, len(errors)) dst_assessment = dst_course.find_unit_by_id(src_assessment.unit_id) self.assertEqual(src_assessment.__dict__, dst_assessment.__dict__) def test_import_13_lesson(self): # Setup courses. sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/') all_courses = sites.get_all_courses() src_app_context = all_courses[0] dst_app_context = all_courses[1] src_course = courses.Course(None, app_context=src_app_context) dst_course = courses.Course(None, app_context=dst_app_context) # Add a unit src_unit = src_course.add_unit() src_lesson = src_course.add_lesson(src_unit) src_lesson.title = 'Test Lesson' src_lesson.scored = True src_lesson.objectives = 'objectives' src_lesson.video = 'video' src_lesson.notes = 'notes' src_lesson.duration = 'duration' src_lesson.now_available = True src_lesson.has_activity = True src_lesson.activity_title = 'activity title' src_lesson.activity_listed = False src_lesson.properties = {'key': 'value'} src_course.save() errors = [] dst_course.import_from(src_app_context, errors) self.assertEqual(0, len(errors)) dst_unit = dst_course.find_unit_by_id(src_unit.unit_id) dst_lesson = dst_course.find_lesson_by_id( dst_unit, src_lesson.lesson_id) assert not dst_lesson.has_activity assert not dst_lesson.activity_title src_dict = copy.deepcopy(src_lesson.__dict__) dst_dict = copy.deepcopy(dst_lesson.__dict__) del src_dict['has_activity'] del src_dict['activity_title'] del dst_dict['has_activity'] del dst_dict['activity_title'] self.assertEqual(src_dict, dst_dict) def test_create_new_course(self): """Tests creating a new course.""" # Setup courses. sites.setup_courses('course:/test::ns_test, course:/:/') # Add several units. course = courses.Course(None, app_context=sites.get_all_courses()[0]) link = course.add_link() unit = course.add_unit() assessment = course.add_assessment() course.save() assert course.find_unit_by_id(link.unit_id) assert course.find_unit_by_id(unit.unit_id) assert course.find_unit_by_id(assessment.unit_id) assert 3 == len(course.get_units()) assert assessment.unit_id == 3 # Check unit can be found. assert unit == course.find_unit_by_id(unit.unit_id) assert not course.find_unit_by_id(999) # Update unit. unit.title = 'Unit Title' unit.labels = 'foo, bar' course.update_unit(unit) course.save() assert 'Unit Title' == course.find_unit_by_id(unit.unit_id).title assert 'foo, bar' == course.find_unit_by_id(unit.unit_id).labels # Update link. link.title = 'Link Title' link.href = 'http://google.com' link.labels = 'bar, baz' course.update_unit(link) course.save() assert 'Link Title' == course.find_unit_by_id(link.unit_id).title assert 'http://google.com' == course.find_unit_by_id(link.unit_id).href assert 'bar, baz' == course.find_unit_by_id(link.unit_id).labels # Update assessment. assessment.title = 'Asmt. Title' assessment.labels = 'a, b, c' course.update_unit(assessment) course.save() assert 'Asmt. Title' == course.find_unit_by_id(assessment.unit_id).title assert 'a, b, c' == course.find_unit_by_id(assessment.unit_id).labels # Update assessment from file. assessment_content = open(os.path.join( appengine_config.BUNDLE_ROOT, 'assets/js/assessment-Pre.js'), 'rb').readlines() assessment_content = u''.join(assessment_content) errors = [] course.set_assessment_content(assessment, assessment_content, errors) course.save() assert not errors assessment_content_stored = course.app_context.fs.get(os.path.join( course.app_context.get_home(), course.get_assessment_filename(assessment.unit_id))) assert assessment_content == assessment_content_stored # Test adding lessons. lesson_a = course.add_lesson(unit) lesson_b = course.add_lesson(unit) lesson_c = course.add_lesson(unit) course.save() assert [lesson_a, lesson_b, lesson_c] == course.get_lessons( unit.unit_id) assert lesson_c.lesson_id == 6 # Make the course available. with actions.OverriddenEnvironment({'course': {'now_available': True}}): # Test public/private assessment. assessment_url = ( '/test/' + course.get_assessment_filename(assessment.unit_id)) assert not assessment.now_available response = self.get(assessment_url, expect_errors=True) assert_equals(response.status_int, 403) assessment = course.find_unit_by_id(assessment.unit_id) assessment.now_available = True course.update_unit(assessment) course.save() response = self.get(assessment_url) assert_equals(response.status_int, 200) # Check delayed assessment deletion. course.delete_unit(assessment) response = self.get(assessment_url) # note: file is still available assert_equals(response.status_int, 200) course.save() response = self.get(assessment_url, expect_errors=True) assert_equals(response.status_int, 404) # Test public/private activity. lesson_a = course.find_lesson_by_id(None, lesson_a.lesson_id) lesson_a.now_available = False lesson_a.has_activity = True course.update_lesson(lesson_a) errors = [] course.set_activity_content(lesson_a, u'var activity = []', errors) assert not errors activity_url = ( '/test/' + course.get_activity_filename( None, lesson_a.lesson_id)) response = self.get(activity_url, expect_errors=True) assert_equals(response.status_int, 403) lesson_a = course.find_lesson_by_id(None, lesson_a.lesson_id) lesson_a.now_available = True course.update_lesson(lesson_a) course.save() response = self.get(activity_url) assert_equals(response.status_int, 200) # Check delayed activity. course.delete_lesson(lesson_a) response = self.get(activity_url) # note: file is still available assert_equals(response.status_int, 200) course.save() response = self.get(activity_url, expect_errors=True) assert_equals(response.status_int, 404) # Test deletes removes all child objects. course.delete_unit(link) course.delete_unit(unit) assert not course.delete_unit(assessment) course.save() assert not course.get_units() assert not course.app_context.fs.list(os.path.join( course.app_context.get_home(), 'assets/js/')) # Clean up. sites.reset_courses() def test_reorder_units(self): """Reorders the units and lessons of the course.""" # Setup courses. sites.setup_courses('course:/test::ns_test, course:/:/') # Add unit. course = courses.Course(None, app_context=sites.get_all_courses()[0]) unit = course.add_unit() unit.title = 'Unit Title' # Test adding lessons. lesson_a = course.add_lesson(unit) lesson_b = course.add_lesson(unit) lesson_c = course.add_lesson(unit) course.save() assert [lesson_a, lesson_b, lesson_c] == course.get_lessons( unit.unit_id) # Reorder lessons. new_order = [{ 'id': unit.unit_id, 'lessons': [ {'id': lesson_b.lesson_id}, {'id': lesson_a.lesson_id}, {'id': lesson_c.lesson_id}]}] course.reorder_units(new_order) course.save() assert [lesson_b, lesson_a, lesson_c] == course.get_lessons( unit.unit_id) # Move lesson to another unit using function move_lesson_to. another_unit = course.add_unit() course.move_lesson_to(lesson_b, another_unit) course.save() assert [lesson_a, lesson_c] == course.get_lessons(unit.unit_id) assert [lesson_b] == course.get_lessons(another_unit.unit_id) # Move lesson to another unit using function reorder_units. new_order = [ {'id': unit.unit_id, 'lessons': [{'id': lesson_a.lesson_id}]}, {'id': another_unit.unit_id, 'lessons': [ {'id': lesson_b.lesson_id}, {'id': lesson_c.lesson_id} ]} ] course.reorder_units(new_order) course.save() assert [lesson_a] == course.get_lessons(unit.unit_id) assert [lesson_b, lesson_c] == course.get_lessons( another_unit.unit_id) # pylint: disable=too-many-statements def test_unit_lesson_not_available(self): """Tests that unavailable units and lessons behave correctly.""" # Setup a new course. sites.setup_courses('course:/test::ns_test, course:/:/') self.base = '/test' config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True app_context = sites.get_all_courses()[0] course = courses.Course(None, app_context=app_context) # Add a unit that is not available. unit_1 = course.add_unit() unit_1.now_available = False lesson_1_1 = course.add_lesson(unit_1) lesson_1_1.title = 'Lesson 1.1' course.update_unit(unit_1) # Add a unit with some lessons available and some lessons not available. unit_2 = course.add_unit() unit_2.now_available = True lesson_2_1 = course.add_lesson(unit_2) lesson_2_1.title = 'Lesson 2.1' lesson_2_1.now_available = False lesson_2_2 = course.add_lesson(unit_2) lesson_2_2.title = 'Lesson 2.2' lesson_2_2.now_available = True course.update_unit(unit_2) # Add a unit with all lessons not available. unit_3 = course.add_unit() unit_3.now_available = True lesson_3_1 = course.add_lesson(unit_3) lesson_3_1.title = 'Lesson 3.1' lesson_3_1.now_available = False course.update_unit(unit_3) # Add a unit that is available. unit_4 = course.add_unit() unit_4.now_available = True lesson_4_1 = course.add_lesson(unit_4) lesson_4_1.title = 'Lesson 4.1' lesson_4_1.now_available = True course.update_unit(unit_4) # Add an available unit with no lessons. unit_5 = course.add_unit() unit_5.now_available = True course.update_unit(unit_5) course.save() assert [lesson_1_1] == course.get_lessons(unit_1.unit_id) assert [lesson_2_1, lesson_2_2] == course.get_lessons(unit_2.unit_id) assert [lesson_3_1] == course.get_lessons(unit_3.unit_id) # Make the course available. with actions.OverriddenEnvironment({ 'course': { 'now_available': True, 'browsable': False}}): private_tag = 'id="lesson-title-private"' # Confirm private units are suppressed for user out of session response = self.get('preview') assert_equals(response.status_int, 200) assert_does_not_contain('Unit 1 - New Unit', response.body) assert_contains('Unit 2 - New Unit', response.body) assert_contains('Unit 3 - New Unit', response.body) assert_contains('Unit 4 - New Unit', response.body) assert_contains('Unit 5 - New Unit', response.body) # Simulate a student traversing the course. email = 'test_unit_lesson_not_available@example.com' name = 'Test Unit Lesson Not Available' actions.login(email, is_admin=False) actions.register(self, name) # Accessing a unit that is not available redirects to the main page. response = self.get('unit?unit=%s' % unit_1.unit_id) assert_equals(response.status_int, 302) response = self.get('unit?unit=%s' % unit_2.unit_id) assert_equals(response.status_int, 200) assert_contains('Lesson 2.1', response.body) assert_contains('This lesson is not available.', response.body) assert_does_not_contain(private_tag, response.body) response = self.get('unit?unit=%s&lesson=%s' % ( unit_2.unit_id, lesson_2_2.lesson_id)) assert_equals(response.status_int, 200) assert_contains('Lesson 2.2', response.body) assert_does_not_contain( 'This lesson is not available.', response.body) assert_does_not_contain(private_tag, response.body) response = self.get('unit?unit=%s' % unit_3.unit_id) assert_equals(response.status_int, 200) assert_contains('Lesson 3.1', response.body) assert_contains('This lesson is not available.', response.body) assert_does_not_contain(private_tag, response.body) response = self.get('unit?unit=%s' % unit_4.unit_id) assert_equals(response.status_int, 200) assert_contains('Lesson 4.1', response.body) assert_does_not_contain( 'This lesson is not available.', response.body) assert_does_not_contain(private_tag, response.body) response = self.get('unit?unit=%s' % unit_5.unit_id) assert_equals(response.status_int, 200) assert_does_not_contain('Lesson', response.body) assert_contains('This unit has no content.', response.body) assert_does_not_contain(private_tag, response.body) actions.logout() # Simulate an admin traversing the course. email = 'test_unit_lesson_not_available@example.com_admin' name = 'Test Unit Lesson Not Available Admin' actions.login(email, is_admin=True) actions.register(self, name) # The course admin can access a unit that is not available. response = self.get('unit?unit=%s' % unit_1.unit_id) assert_equals(response.status_int, 200) assert_contains('Lesson 1.1', response.body) response = self.get('unit?unit=%s' % unit_2.unit_id) assert_equals(response.status_int, 200) assert_contains('Lesson 2.1', response.body) assert_does_not_contain( 'This lesson is not available.', response.body) assert_contains(private_tag, response.body) response = self.get('unit?unit=%s&lesson=%s' % ( unit_2.unit_id, lesson_2_2.lesson_id)) assert_equals(response.status_int, 200) assert_contains('Lesson 2.2', response.body) assert_does_not_contain( 'This lesson is not available.', response.body) assert_does_not_contain(private_tag, response.body) response = self.get('unit?unit=%s' % unit_3.unit_id) assert_equals(response.status_int, 200) assert_contains('Lesson 3.1', response.body) assert_does_not_contain( 'This lesson is not available.', response.body) assert_contains(private_tag, response.body) response = self.get('unit?unit=%s' % unit_4.unit_id) assert_equals(response.status_int, 200) assert_contains('Lesson 4.1', response.body) assert_does_not_contain( 'This lesson is not available.', response.body) assert_does_not_contain(private_tag, response.body) response = self.get('unit?unit=%s' % unit_5.unit_id) assert_equals(response.status_int, 200) assert_does_not_contain('Lesson', response.body) assert_contains('This unit has no content.', response.body) assert_does_not_contain(private_tag, response.body) actions.logout() # pylint: disable=too-many-statements def test_custom_assessments(self): """Tests that custom assessments are evaluated correctly.""" # Setup a new course. sites.setup_courses('course:/test::ns_test, course:/:/') self.base = '/test' self.namespace = 'ns_test' config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True app_context = sites.get_all_courses()[0] course = courses.Course(None, app_context=app_context) name = 'Test Assessments' assessment_1 = course.add_assessment() assessment_1.title = 'first' assessment_1.now_available = True assessment_1.weight = 0 assessment_2 = course.add_assessment() assessment_2.title = 'second' assessment_2.now_available = True assessment_2.weight = 0 course.save() assert course.find_unit_by_id(assessment_1.unit_id) assert course.find_unit_by_id(assessment_2.unit_id) assert 2 == len(course.get_units()) # Make the course available. with actions.OverriddenEnvironment({'course': {'now_available': True}}): first = {'score': '1.00', 'assessment_type': assessment_1.unit_id} second = {'score': '3.00', 'assessment_type': assessment_2.unit_id} # Update assessment 1. assessment_1_content = open(os.path.join( appengine_config.BUNDLE_ROOT, 'assets/js/assessment-Pre.js'), 'rb').readlines() assessment_1_content = u''.join(assessment_1_content) errors = [] course.set_assessment_content( assessment_1, assessment_1_content, errors) course.save() assert not errors # Update assessment 2. assessment_2_content = open(os.path.join( appengine_config.BUNDLE_ROOT, 'assets/js/assessment-Mid.js'), 'rb').readlines() assessment_2_content = u''.join(assessment_2_content) errors = [] course.set_assessment_content( assessment_2, assessment_2_content, errors) course.save() assert not errors # Register. user = actions.login('test_assessments@google.com') actions.register(self, name) # Submit assessment 1. actions.submit_assessment(self, assessment_1.unit_id, first) student = ( models.StudentProfileDAO.get_enrolled_student_by_user_for( user, app_context)) student_scores = course.get_all_scores(student) assert len(student_scores) == 2 assert student_scores[0]['id'] == str(assessment_1.unit_id) assert student_scores[0]['score'] == 1 assert student_scores[0]['title'] == 'first' assert student_scores[0]['weight'] == 0 assert student_scores[1]['id'] == str(assessment_2.unit_id) assert student_scores[1]['score'] == 0 assert student_scores[1]['title'] == 'second' assert student_scores[1]['weight'] == 0 # The overall score is None if there are no weights assigned to any # of the assessments. overall_score = course.get_overall_score(student) assert overall_score is None # View the student profile page. response = self.get('student/home') assert_does_not_contain('Overall course score', response.body) assert_does_not_contain('Skills Progress', response.body) # Add a weight to the first assessment. assessment_1.weight = 10 overall_score = course.get_overall_score(student) assert overall_score == 1 # Submit assessment 2. actions.submit_assessment(self, assessment_2.unit_id, second) # We need to reload the student instance, because its properties # have changed. student = ( models.StudentProfileDAO.get_enrolled_student_by_user_for( user, app_context)) student_scores = course.get_all_scores(student) assert len(student_scores) == 2 assert student_scores[1]['score'] == 3 overall_score = course.get_overall_score(student) assert overall_score == 1 # Change the weight of assessment 2. assessment_2.weight = 30 overall_score = course.get_overall_score(student) assert overall_score == int((1 * 10 + 3 * 30) / 40) # Save all changes. course.save() # View the student profile page. response = self.get('student/home') assert_contains('assessment-score-first">1</span>', response.body) assert_contains('assessment-score-second">3</span>', response.body) assert_contains('Overall course score', response.body) assert_contains('assessment-score-overall">2</span>', response.body) # Submitting a lower score for any assessment does not change any of # the scores, since the system records the maximum score that has # ever been achieved on any assessment. first_retry = { 'score': '0', 'assessment_type': assessment_1.unit_id} actions.submit_assessment(self, assessment_1.unit_id, first_retry) student = ( models.StudentProfileDAO.get_enrolled_student_by_user_for( user, app_context)) student_scores = course.get_all_scores(student) assert len(student_scores) == 2 assert student_scores[0]['id'] == str(assessment_1.unit_id) assert student_scores[0]['score'] == 1 overall_score = course.get_overall_score(student) assert overall_score == int((1 * 10 + 3 * 30) / 40) actions.logout() def test_datastore_backed_file_system(self): """Tests datastore-backed file system operations.""" fs = vfs.AbstractFileSystem(vfs.DatastoreBackedFileSystem('', '/')) # Check binary file. src = os.path.join(appengine_config.BUNDLE_ROOT, 'course.yaml') dst = os.path.join('/', 'course.yaml') fs.put(dst, open(src, 'rb')) stored = fs.open(dst) assert stored.metadata.size == len(open(src, 'rb').read()) assert not stored.metadata.is_draft assert stored.read() == open(src, 'rb').read() # Check draft. fs.put(dst, open(src, 'rb'), is_draft=True) stored = fs.open(dst) assert stored.metadata.is_draft # Check text files with non-ASCII characters and encoding. foo_js = os.path.join('/', 'assets/js/foo.js') foo_text = u'This is a test text (тест данные).' fs.put(foo_js, vfs.string_to_stream(foo_text)) stored = fs.open(foo_js) assert vfs.stream_to_string(stored) == foo_text # Check delete. del_file = os.path.join('/', 'memcache.test') fs.put(del_file, vfs.string_to_stream(u'test')) assert fs.isfile(del_file) fs.delete(del_file) assert not fs.isfile(del_file) # Check open or delete of non-existent does not fail. assert not fs.open('/foo/bar/baz') assert not fs.delete('/foo/bar/baz') # Check new content fully overrides old (with and without memcache). test_file = os.path.join('/', 'memcache.test') fs.put(test_file, vfs.string_to_stream(u'test text')) stored = fs.open(test_file) assert u'test text' == vfs.stream_to_string(stored) fs.delete(test_file) # Check file existence. assert not fs.isfile('/foo/bar') assert fs.isfile('/course.yaml') assert fs.isfile('/assets/js/foo.js') # Check file listing. bar_js = os.path.join('/', 'assets/js/bar.js') fs.put(bar_js, vfs.string_to_stream(foo_text)) baz_js = os.path.join('/', 'assets/js/baz.js') fs.put(baz_js, vfs.string_to_stream(foo_text)) assert fs.list('/') == sorted([ u'/course.yaml', u'/assets/js/foo.js', u'/assets/js/bar.js', u'/assets/js/baz.js']) assert fs.list('/assets') == sorted([ u'/assets/js/foo.js', u'/assets/js/bar.js', u'/assets/js/baz.js']) assert not fs.list('/foo/bar') def test_utf8_datastore(self): """Test writing to and reading from datastore using UTF-8 content.""" event = models.EventEntity() event.source = 'test-source' event.user_id = 'test-user-id' event.data = u'Test Data (тест данные)' event.put() stored_event = models.EventEntity().get_by_id([event.key().id()]) assert 1 == len(stored_event) assert event.data == stored_event[0].data def assert_queriable(self, entity, name, date_type=datetime.datetime): """Create some entities and check that single-property queries work.""" for i in range(1, 32): item = entity( key_name='%s_%s' % (date_type.__class__.__name__, i)) setattr(item, name, date_type(2012, 1, i)) item.put() # Descending order. items = entity.all().order('-%s' % name).fetch(1000) assert len(items) == 31 assert getattr(items[0], name) == date_type(2012, 1, 31) # Ascending order. items = entity.all().order('%s' % name).fetch(1000) assert len(items) == 31 assert getattr(items[0], name) == date_type(2012, 1, 1) def test_indexed_properties(self): """Test whether entities support specific query types.""" # A 'DateProperty' or 'DateTimeProperty' of each persistent entity must # be indexed. This is true even if the application doesn't execute any # queries relying on the index. The index is still critically important # for managing data, for example, for bulk data download or for # incremental computations. Using index, the entire table can be # processed in daily, weekly, etc. chunks and it is easy to query for # new data. If we did not have an index, chunking would have to be done # by the primary index, where it is impossible to separate recently # added/modified rows from the rest of the data. Having this index adds # to the cost of datastore writes, but we believe it is important to # have it. Below we check that all persistent date/datetime properties # are indexed. self.assert_queriable( AnnouncementEntity, 'date', date_type=datetime.date) self.assert_queriable(models.EventEntity, 'recorded_on') self.assert_queriable(models.Student, 'enrolled_on') self.assert_queriable(models.StudentAnswersEntity, 'updated_on') self.assert_queriable(jobs.DurableJobEntity, 'updated_on') def test_db_overrides_apply_after_prop_registration(self): prop = config.ConfigPropertyEntity(key_name='prop1') prop.value = 'foo' prop.is_draft = False prop.put() config.Registry.get_overrides(force_update=True) prop = config.ConfigProperty( 'prop1', config.TYPE_STR, '', default_value='bar') self.assertEqual(prop.value, 'foo') def test_config_visible_from_any_namespace(self): """Test that ConfigProperty is visible from any namespace.""" assert ( config.UPDATE_INTERVAL_SEC.value == config.UPDATE_INTERVAL_SEC.default_value) new_value = config.UPDATE_INTERVAL_SEC.default_value + 5 # Add datastore override for known property. prop = config.ConfigPropertyEntity( key_name=config.UPDATE_INTERVAL_SEC.name) prop.value = str(new_value) prop.is_draft = False prop.put() # Check visible from default namespace. config.Registry.last_update_time = 0 assert config.UPDATE_INTERVAL_SEC.value == new_value # Check visible from another namespace. old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace( 'ns-test_config_visible_from_any_namespace') config.Registry.last_update_time = 0 assert config.UPDATE_INTERVAL_SEC.value == new_value finally: namespace_manager.set_namespace(old_namespace) class AdminAspectTest(actions.TestBase): """Test site from the Admin perspective.""" def test_default_admin_tab_is_courses(self): actions.login('test_appstats@google.com', is_admin=True) response = self.testapp.get('/admin/global') dom = self.parse_html_string(self.testapp.get('/admin/global').body) group = dom.find('.//*[@id="menu-group__admin"]') item = dom.find('.//*[@id="menu-item__admin__courses"]') self.assertIn('gcb-active-group', group.get('class')) self.assertIn('gcb-active', item.get('class')) def test_appstats(self): """Checks that appstats is available when enabled.""" email = 'test_appstats@google.com' # check appstats is disabled by default actions.login(email, is_admin=True) response = self.testapp.get('/admin/global') assert_equals(response.status_int, 200) assert_does_not_contain('>Appstats</a>', response.body) assert_does_not_contain('/admin/stats/', response.body) # enable and check appstats is now enabled modules.admin.admin.notify_module_disabled() os.environ['GCB_APPSTATS_ENABLED'] = 'True' modules.admin.admin.notify_module_enabled() response = self.testapp.get('/admin/global') assert_equals(response.status_int, 200) dom = self.parse_html_string(response.body) stats_menu_item = dom.find('.//*[@id="menu-item__admin__stats"]') self.assertIsNotNone(stats_menu_item) self.assertEqual(stats_menu_item.get('href'), '/admin/stats/') modules.admin.admin.notify_module_disabled() del os.environ['GCB_APPSTATS_ENABLED'] modules.admin.admin.notify_module_enabled() def test_courses_page_for_multiple_courses(self): """Tests /admin page showing multiple courses.""" # Setup courses. sites.setup_courses('course:/aaa::ns_a, course:/bbb::ns_b, course:/:/') config.Registry.test_overrides[ models.CAN_USE_MEMCACHE.name] = True # Validate the courses before import. all_courses = sites.get_all_courses() dst_app_context_a = all_courses[0] dst_app_context_b = all_courses[1] src_app_context = all_courses[2] # This test requires a read-write file system. If test runs on read- # only one, we can't run this test :( if (not dst_app_context_a.fs.is_read_write() or not dst_app_context_a.fs.is_read_write()): return course_a = courses.Course(None, app_context=dst_app_context_a) course_b = courses.Course(None, app_context=dst_app_context_b) unused_course, course_a = course_a.import_from(src_app_context) unused_course, course_b = course_b.import_from(src_app_context) # Rename courses. with Namespace(dst_app_context_a.get_namespace_name()): course_a.save_settings({'course': {'title': 'Course AAA'}}) with Namespace(dst_app_context_b.get_namespace_name()): course_b.save_settings({'course': {'title': 'Course BBB'}}) # Login. email = 'test_courses_page_for_multiple_courses@google.com' actions.login(email, is_admin=True) # Check the course listing page. response = self.testapp.get('/admin') assert_contains_all_of([ 'Course AAA', '/aaa/dashboard', 'Course BBB', '/bbb/dashboard'], response.body) # Clean up. sites.reset_courses() def test_python_console(self): """Test access rights to the Python console.""" email = 'test_python_console@google.com' # The default is that the console should be turned off self.assertFalse(modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED) # Test the console when it is enabled modules.admin.admin.notify_module_disabled() modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED = True modules.admin.admin.notify_module_enabled() # Check normal user has no access. actions.login(email) response = self.testapp.get('/admin/global?action=console') assert_equals(response.status_int, 302) response = self.testapp.post('/admin/global?action=console') assert_equals(response.status_int, 302) # Check delegated admin has no access. os.environ['gcb_admin_user_emails'] = '[%s]' % email actions.login(email) response = self.testapp.get('/admin/global?action=console') assert_equals(response.status_int, 200) assert_contains( 'You must be an actual admin user to continue.', response.body) response = self.testapp.get('/admin/global?action=console') assert_equals(response.status_int, 200) assert_contains( 'You must be an actual admin user to continue.', response.body) del os.environ['gcb_admin_user_emails'] # Check actual admin has access. actions.login(email, is_admin=True) response = self.testapp.get('/admin/global?action=console') assert_equals(response.status_int, 200) response.forms[0].set('code', 'print "foo" + "bar"') response = self.submit(response.forms[0], response) assert_contains('foobar', response.body) # Finally, test that the console is not found when it is disabled modules.admin.admin.notify_module_disabled() modules.admin.admin.DIRECT_CODE_EXECUTION_UI_ENABLED = False modules.admin.admin.notify_module_enabled() actions.login(email, is_admin=True) self.testapp.get('/admin/global?action=console', status=404) self.testapp.post('/admin/global?action=console_run', status=404) def test_non_admin_has_no_access(self): """Test non admin has no access to pages or REST endpoints.""" email = 'test_non_admin_has_no_access@google.com' actions.login(email) # Add datastore override. prop = config.ConfigPropertyEntity( key_name='gcb_config_update_interval_sec') prop.value = '5' prop.is_draft = False prop.put() # Check user has no access to specific pages and actions. response = self.testapp.get('/admin/global?action=settings') assert_equals(response.status_int, 302) response = self.testapp.get( '/admin/global?action=config_edit&name=gcb_admin_user_emails') assert_equals(response.status_int, 302) response = self.testapp.post( '/admin/global?action=config_reset&name=gcb_admin_user_emails') assert_equals(response.status_int, 302) # Check user has no rights to GET verb. response = self.testapp.get( '/rest/config/item?key=gcb_config_update_interval_sec') assert_equals(response.status_int, 200) json_dict = transforms.loads(response.body) assert json_dict['status'] == 401 assert json_dict['message'] == 'Access denied.' # Here are the endpoints we want to test: (uri, xsrf_action_name). endpoints = [ ('/rest/config/item', 'config-property-put'), ('/rest/courses/item', 'add-course-put')] # Check user has no rights to PUT verb. payload_dict = {} payload_dict['value'] = '666' payload_dict['is_draft'] = False request = {} request['key'] = 'gcb_config_update_interval_sec' request['payload'] = transforms.dumps(payload_dict) for uri, unused_action in endpoints: response = self.testapp.put(uri + '?%s' % urllib.urlencode( {'request': transforms.dumps(request)}), {}) assert_equals(response.status_int, 200) assert_contains('"status": 403', response.body) # Check user still has no rights to PUT verb even if he somehow # obtained a valid XSRF token. for uri, action in endpoints: request['xsrf_token'] = XsrfTokenManager.create_xsrf_token(action) response = self.testapp.put(uri + '?%s' % urllib.urlencode( {'request': transforms.dumps(request)}), {}) assert_equals(response.status_int, 200) json_dict = transforms.loads(response.body) assert json_dict['status'] == 401 assert json_dict['message'] == 'Access denied.' def test_admin_list(self): """Test delegation of admin access to another user.""" email = 'test_admin_list@google.com' actions.login(email) # Add environment variable override. os.environ['gcb_admin_user_emails'] = '[%s]' % email # Add datastore override. prop = config.ConfigPropertyEntity( key_name='gcb_config_update_interval_sec') prop.value = '5' prop.is_draft = False prop.put() # Check user has access now. response = self.testapp.get('/admin/global?action=settings') assert_equals(response.status_int, 200) # Check overrides are active and have proper management actions. assert_contains('gcb_admin_user_emails', response.body) assert_contains('[test_admin_list@google.com]', response.body) assert_contains( '/admin/global?action=config_override&amp;' 'name=gcb_admin_user_emails', response.body) assert_contains( '/admin/global?action=config_edit&amp;' 'name=gcb_config_update_interval_sec', response.body) # Check editor page has proper actions. response = self.testapp.get( '/admin/global?action=config_edit&amp;' 'name=gcb_config_update_interval_sec') assert_equals(response.status_int, 200) assert_contains('/admin/global?action=config_reset', response.body) assert_contains('name=gcb_config_update_interval_sec', response.body) # Remove override. del os.environ['gcb_admin_user_emails'] # Check user has no access. response = self.testapp.get('/admin/global?action=settings') assert_equals(response.status_int, 302) def test_access_to_admin_pages(self): """Test access to admin pages.""" # assert anonymous user has no access response = self.testapp.get('/admin/global?action=settings') assert_equals(response.status_int, 302) # assert admin user has access email = 'test_access_to_admin_pages@google.com' name = 'Test Access to Admin Pages' actions.login(email, is_admin=True) actions.register(self, name) response = self.testapp.get('/admin/global') assert_contains('Power Searching with Google', response.body) response = self.testapp.get('/admin/global?action=settings') assert_contains('gcb_admin_user_emails', response.body) assert_contains('gcb_config_update_interval_sec', response.body) response = self.testapp.get('/admin/global?action=perf') assert_contains('gcb-admin-uptime-sec:', response.body) assert_contains('In-process Performance Counters', response.body) response = self.testapp.get('/admin/global?action=deployment') assert_contains('application_id: testbed-test', response.body) assert_contains('About the Application', response.body) actions.unregister(self) actions.logout() # assert not-admin user has no access actions.login(email) actions.register(self, name) response = self.testapp.get('/admin/global?action=settings') assert_equals(response.status_int, 302) def test_multiple_courses(self): """Test courses admin page with two courses configured.""" sites.setup_courses( 'course:/foo:/foo-data, course:/bar:/bar-data:nsbar') email = 'test_multiple_courses@google.com' actions.login(email, is_admin=True) response = self.testapp.get('/admin/global') assert_contains('Course Builder &gt; Admin &gt; Courses', response.body) assert_contains('Total: 2 item(s)', response.body) # Check ocurse URL's. assert_contains('<a href="/foo/dashboard">', response.body) assert_contains('<a href="/bar/dashboard">', response.body) # Check content locations. assert_contains('/foo-data', response.body) assert_contains('/bar-data', response.body) # Check namespaces. assert_contains('gcb-course-foo-data', response.body) assert_contains('nsbar', response.body) # Clean up. sites.reset_courses() def test_add_course(self): """Tests adding a new course entry.""" if not self.supports_editing: return email = 'test_add_course@google.com' actions.login(email, is_admin=True) # Prepare request data. payload_dict = { 'name': 'add_new', 'title': u'new course (тест данные)', 'admin_email': 'foo@bar.com'} request = {} request['payload'] = transforms.dumps(payload_dict) request['xsrf_token'] = XsrfTokenManager.create_xsrf_token( 'add-course-put') # Execute action. response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode( {'request': transforms.dumps(request)}), {}) assert_equals(response.status_int, 200) # Check response. json_dict = transforms.loads(transforms.loads(response.body)['payload']) assert 'course:/add_new::ns_add_new' == json_dict.get('entry') # Re-execute action; should fail as this would create a duplicate. response = self.testapp.put('/rest/courses/item?%s' % urllib.urlencode( {'request': transforms.dumps(request)}), {}) assert_equals(response.status_int, 200) assert_equals(412, transforms.loads(response.body)['status']) # Load the course and check its title. new_app_context = sites.get_all_courses( 'course:/add_new::ns_add_new')[0] assert_equals(u'new course (тест данные)', new_app_context.get_title()) new_course = courses.Course(None, app_context=new_app_context) assert not new_course.get_units() class CourseAuthorAspectTest(actions.TestBase): """Tests the site from the Course Author perspective.""" # pylint: disable=too-many-statements def test_dashboard(self): """Test course dashboard.""" email = 'test_dashboard@google.com' name = 'Test Dashboard' # Non-admin does't have access. actions.login(email) response = self.get('dashboard') assert_equals(response.status_int, 302) actions.register(self, name) assert_equals(response.status_int, 302) actions.logout() # Admin has access. actions.login(email, is_admin=True) response = self.get('dashboard') # Verify title does not have link text assert_contains( '<title>Course Builder &gt; Power Searching with Google &gt; Dash', response.body) # Verify body has breadcrumb trail. assert_contains('Google &gt; Dashboard &gt; Outline', response.body) # Tests outline view. response = self.get('dashboard') assert_contains('Advanced techniques', response.body) # Check editability. if self.supports_editing: assert_contains('Add Assessment', response.body) else: assert_does_not_contain('Add Assessment', response.body) # Test assets view. response = self.get('dashboard?action=style_css') # Verify title does not have link text assert_contains( '<title>Course Builder &gt; Power Searching with Google &gt; Dash', response.body) # Verify body has breadcrumb trail. assert_contains( 'Google &gt; Dashboard &gt; Assets &gt; CSS', response.body) assert_contains('assets/css/main.css', response.body) response = self.get('dashboard?action=edit_images') assert_contains('assets/img/Image1.5.png', response.body) response = self.get('dashboard?action=style_js') assert_contains('assets/lib/activity-generic-1.3.js', response.body) # Test settings view. response = self.get('dashboard?action=settings_advanced') # Verify title does not have link text assert_contains( '<title>Course Builder &gt; Power Searching with Google &gt; Dash', response.body) # Verify body has breadcrumb trail. assert_contains('Google &gt; Dashboard &gt; Settings', response.body) assert_contains('course.yaml', response.body) assert_contains( 'title: &#39;Power Searching with Google&#39;', response.body) assert_contains('locale: &#39;en_US&#39;', response.body) # Check editability. if self.supports_editing: assert_contains('create_or_edit_settings', response.body) else: assert_does_not_contain('create_or_edit_settings', response.body) # Tests student statistics view. response = self.get('dashboard?action=analytics_students') # Verify title does not have link text assert_contains( '<title>Course Builder &gt; Power Searching with Google &gt; Dash', response.body) # Verify body has breadcrumb trail. assert_contains( 'Google &gt; Dashboard &gt; Manage &gt; Students', response.body) assert_contains('have not been calculated yet', response.body) response = response.forms[ 'gcb-generate-analytics-data'].submit().follow() assert len(self.taskq.GetTasks('default')) == 3 response = self.get(response.request.url) assert_contains('is running', response.body) self.execute_all_deferred_tasks() response = self.get(response.request.url) assert_contains('were last updated at', response.body) assert_contains('currently enrolled: 1', response.body) assert_contains('total: 1', response.body) # Tests assessment statistics. old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace(self.namespace) try: for i in range(5): student = models.Student(key_name='key-%s' % i) student.is_enrolled = True student.scores = transforms.dumps({'test-assessment': i}) student.put() finally: namespace_manager.set_namespace(old_namespace) response = self.get(response.request.url) response = response.forms[ 'gcb-generate-analytics-data'].submit().follow() self.execute_all_deferred_tasks() response = self.get(response.request.url) assert_contains('currently enrolled: 6', response.body) assert_contains( 'test-assessment (deleted): completed 5, average score 2.0', response.body) def test_no_announcements(self): """Test course author can trigger adding sample announcements.""" email = 'test_announcements@google.com' name = 'Test Announcements' actions.login(email, is_admin=True) actions.register(self, name) response = actions.view_announcements(self) assert_contains('Currently, there are no announcements.', response.body) def test_manage_announcements(self): """Test course author can manage announcements.""" email = 'test_announcements@google.com' name = 'Test Announcements' actions.login(email, is_admin=True) actions.register(self, name) # add new response = actions.view_announcements(self) add_form = response.forms['gcb-add-announcement'] response = self.submit(add_form) assert_equals(response.status_int, 302) # check edit form rendering response = self.testapp.get(response.location) assert_equals(response.status_int, 200) assert_contains('/rest/announcements/item?key=', response.body) # check added response = actions.view_announcements(self) assert_contains('Sample Announcement (Draft)', response.body) # delete draft response = actions.view_announcements(self) delete_form = response.forms['gcb-delete-announcement-0'] response = self.submit(delete_form) assert_equals(response.status_int, 302) # check deleted assert_does_not_contain('Welcome to the final class!', response.body) def test_announcements_rest(self): """Test REST access to announcements.""" email = 'test_announcements_rest@google.com' name = 'Test Announcements Rest' actions.login(email, is_admin=True) actions.register(self, name) response = actions.view_announcements(self) assert_does_not_contain('My Test Title', response.body) # REST GET existing item items = AnnouncementEntity.all().fetch(1) for item in items: response = self.get('rest/announcements/item?key=%s' % item.key()) json_dict = transforms.loads(response.body) assert json_dict['status'] == 200 assert 'message' in json_dict assert 'payload' in json_dict payload_dict = transforms.loads(json_dict['payload']) assert 'title' in payload_dict assert 'date' in payload_dict # REST PUT item payload_dict['title'] = u'My Test Title Мой заголовок теста' payload_dict['date'] = '2012/12/31' payload_dict['is_draft'] = True payload_dict['send_email'] = False request = {} request['key'] = str(item.key()) request['payload'] = transforms.dumps(payload_dict) # Check XSRF is required. response = self.put('rest/announcements/item?%s' % urllib.urlencode( {'request': transforms.dumps(request)}), {}) assert_equals(response.status_int, 200) assert_contains('"status": 403', response.body) # Check PUT works. request['xsrf_token'] = json_dict['xsrf_token'] response = self.put('rest/announcements/item?%s' % urllib.urlencode( {'request': transforms.dumps(request)}), {}) assert_equals(response.status_int, 200) assert_contains('"status": 200', response.body) # Confirm change is visible on the page. response = self.get('announcements') assert_contains( u'My Test Title Мой заголовок теста (Draft)', response.body) # REST GET not-existing item response = self.get('rest/announcements/item?key=not_existent_key') json_dict = transforms.loads(response.body) assert json_dict['status'] == 404 class CourseAuthorCourseCreationTest(actions.TestBase): def test_course_admin_does_not_see_courses_he_does_not_administer(self): admin_email = 'admin@foo.com' author_email = 'author@foo.com' actions.login(admin_email, is_admin=True) actions.simple_add_course('course_one', admin_email, 'Course One') actions.simple_add_course('course_two', admin_email, 'Course Two') actions.simple_add_course('course_three', admin_email, 'Course Three') actions.update_course_config('course_one', { 'course': {'admin_user_emails': author_email}}) actions.update_course_config('course_two', { 'course': {'admin_user_emails': author_email}}) actions.login(author_email) # Visit course_one's dashboard response = self.get('/course_one/dashboard') # Expect to be able to see peer course for which author has admin rights self.assertIn('Course Two', response.body) self.assertIn('/course_two', response.body) # But not peer course for which he does not. self.assertNotIn('Course Three', response.body) self.assertNotIn('/course_three', response.body) class StudentKeyNameTest(actions.TestBase): """Test use of email and user_id as key_name in Student.""" def setUp(self): super(StudentKeyNameTest, self).setUp() self._old_enabled = models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED def tearDown(self): models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = self._old_enabled super(StudentKeyNameTest, self).tearDown() def _assert_user_lookups_work(self, user, by_email=True): student = models.Student.get_by_user(user) self.assertEqual(user.email(), student.email) self.assertEqual(user.user_id(), student.user_id) student = models.Student.get_by_user_id(user.user_id()) self.assertEqual(user.email(), student.email) self.assertEqual(user.user_id(), student.user_id) if by_email: student, _ = models.Student.get_first_by_email(user.email()) self.assertEqual(user.email(), student.email) self.assertEqual(user.user_id(), student.user_id) def test_user_id_is_immutable(self): models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = False user = actions.login('test_user_id_is_immutable@google.com') actions.register(self, 'Test User') def mutate_user_id(): student = models.Student.get_by_user_id(user.user_id()) student.user_id = '456' self.assertRaises(ValueError, mutate_user_id) def test_email_can_change(self): models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = True user = actions.login('test_email_change@google.com') actions.register(self, 'Test EMail Change') self._assert_user_lookups_work(user) student, unique = models.Student.get_first_by_email(user.email()) self.assertTrue(unique) student.email = 'new_email@google.com' student.put() self._assert_user_lookups_work(users.User( email='new_email@google.com', _user_id=user.user_id())) student, unique = models.Student.get_first_by_email( 'new_email@google.com') self.assertTrue(unique) self.assertEqual('test_email_change@google.com', student.key().name()) def test_email_is_unique_under_legacy(self): models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = True user1 = actions.login('user1@google.com') actions.register(self, 'User 1') actions.logout() user2 = actions.login('user2@google.com') actions.register(self, 'User 2') actions.logout() student, unique = models.Student.get_first_by_email(user1.email()) self.assertTrue(unique) student.email = 'user2@google.com' student.put() student, unique = models.Student.get_first_by_email(user1.email()) self.assertTrue(unique) self.assertEqual('User 1', student.name) self.assertEqual('user2@google.com', student.email) self._assert_user_lookups_work(users.User( email='user2@google.com', _user_id=user1.user_id()), by_email=False) self._assert_user_lookups_work(user2) def test_two_users_with_identical_emails_can_register(self): self.assertEqual(0, len(models.Student.all().fetch(2))) models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = False user1 = actions.login_with_specified_user_id('user@google.com', '123') actions.register(self, 'User 1') actions.logout() user2 = actions.login_with_specified_user_id('user@google.com', '456') actions.register(self, 'User 2') actions.logout() self.assertEqual(2, len(models.Student.all().fetch(2))) student1 = models.Student.get_by_user(user1) self.assertEqual(user1.user_id(), student1.user_id) student2 = models.Student.get_by_user(user2) self.assertEqual(user2.user_id(), student2.user_id) def test_email_is_not_unique(self): models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = False user1 = actions.login_with_specified_user_id('user1@google.com', '123') actions.register(self, 'User 1') actions.logout() user2 = actions.login_with_specified_user_id('user2@google.com', '456') actions.register(self, 'User 2') actions.logout() student, unique = models.Student.get_first_by_email(user1.email()) self.assertTrue(unique) student.email = 'user2@google.com' student.put() self._assert_user_lookups_work(users.User( email='user2@google.com', _user_id=user1.user_id()), by_email=False) self._assert_user_lookups_work(user2, by_email=False) # this email no longer in use by user1 self.assertEqual( (None, False), models.Student.get_first_by_email('user1@google.com')) # two users have the same email student, unique = models.Student.get_first_by_email('user2@google.com') self.assertTrue(student.user_id == user1.user_id() or student.user_id == user2.user_id()) self.assertFalse(unique) # check both users can login both having identical email addresses user1_changed = actions.login_with_specified_user_id( 'user2@google.com', '123') response = self.get('student/home') email = self.parse_html_string(response.body).find( './/table[@class="gcb-student-data-table"]//tr[2]//td') assert_contains('user2@google.com', email.text) assert_contains('User 1', response.body) actions.logout() user2 = actions.login_with_specified_user_id('user2@google.com', '456') response = self.get('student/home') email = self.parse_html_string(response.body).find( './/table[@class="gcb-student-data-table"]//tr[2]//td') assert_contains('user2@google.com', email.text) assert_contains('User 2', response.body) actions.logout() def test_mixed_key_name_strategies(self): models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = True user1 = actions.login('user1@google.com') actions.register(self, 'Legacy User 1') actions.logout() models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = False user2 = actions.login('user2@google.com') actions.register(self, 'User 2') actions.logout() self._assert_user_lookups_work(user1) self.assertEqual( user1.email(), models.Student.get_by_user(user1).key().name()) self._assert_user_lookups_work(user2) self.assertEqual( user2.user_id(), models.Student.get_by_user(user2).key().name()) def test_legacy_user_can_reregister(self): models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = True actions.login('user1@google.com') actions.register(self, 'User 1') actions.unregister(self) actions.logout() models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = False actions.login('user1@google.com') actions.register(self, 'User 1') self.assertEqual(1, len(models.Student.all().fetch(2))) def test_registration_relies_on_user_id_and_ignores_email(self): models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = True actions.login_with_specified_user_id('user1@google.com', '123') actions.register(self, 'User 1') actions.logout() models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = False actions.login_with_specified_user_id('user2@google.com', '123') self.assertRaises(Exception, actions.register, 'User 2') class StudentAspectTest(actions.TestBase): """Test the site from the Student perspective.""" def test_view_announcements(self): """Test student aspect of announcements.""" email = 'test_announcements@google.com' name = 'Test Announcements' actions.login(email) actions.register(self, name) # Check no announcements yet. response = actions.view_announcements(self) assert_does_not_contain('Example Announcement', response.body) assert_does_not_contain('Welcome to the final class!', response.body) assert_contains('Currently, there are no announcements.', response.body) actions.logout() # Login as admin and add announcements. actions.login('admin@sample.com', is_admin=True) actions.register(self, 'admin') response = actions.view_announcements(self) add_form = response.forms['gcb-add-announcement'] response = self.submit(add_form).follow() match = re.search(r'\'([^\']+rest/announcements/item\?key=([^\']+))', response.body) url = match.group(1) key = match.group(2) response = self.get(url) json_dict = transforms.loads(response.body) payload_dict = transforms.loads(json_dict['payload']) payload_dict['title'] = u'My Test Title' payload_dict['date'] = '2015/02/03' payload_dict['is_draft'] = False payload_dict['send_email'] = False request = {} request['key'] = key request['payload'] = transforms.dumps(payload_dict) request['xsrf_token'] = json_dict['xsrf_token'] response = self.put('rest/announcements/item?%s' % urllib.urlencode( {'request': transforms.dumps(request)}), {}) assert_equals(response.status_int, 200) actions.logout() # Check we can see non-draft announcements. actions.login(email) response = actions.view_announcements(self) assert_contains('My Test Title', response.body) assert_does_not_contain('Welcome to the final class!', response.body) assert_does_not_contain('Currently, there are no announcements.', response.body) # Check no access to access to draft announcements via REST handler. items = AnnouncementEntity.all().fetch(1000) for item in items: response = self.get('rest/announcements/item?key=%s' % item.key()) if item.is_draft: json_dict = transforms.loads(response.body) assert json_dict['status'] == 401 else: assert_equals(response.status_int, 200) def test_registration(self): """Test student registration.""" email = 'test_registration@example.com' name1 = 'Test Student' name2 = 'John Smith' name3 = u'Pavel Simakov (тест данные)' actions.login(email) # Verify registration is present on /course to unregistered student. response = self.get('course') self.assertIn('<a href="register">Registration</a>', response.body) actions.register(self, name1) actions.check_profile(self, name1) # Verify registration link is gone once registered. response = self.get('course') self.assertNotIn('<a href="register">Registration</a>', response.body) actions.change_name(self, name2) actions.unregister(self) actions.register(self, name3) actions.check_profile(self, name3) def test_course_not_available(self): """Tests course is only accessible to author when incomplete.""" email = 'test_course_not_available@example.com' name = 'Test Course Not Available' actions.login(email) actions.register(self, name) # Check preview and static resources are available. response = self.get('course') assert_equals(response.status_int, 200) response = self.get('assets/js/activity-1.3.js') assert_equals(response.status_int, 200) # Override course.yaml settings by patching app_context. with actions.OverriddenEnvironment( {'course': {'now_available': False}}): # Check preview and static resources are not available to Student. response = self.get('course', expect_errors=True) assert_equals(response.status_int, 404) response = self.get('assets/js/activity-1.3.js', expect_errors=True) assert_equals(response.status_int, 404) # Check preview and static resources are still available to author. actions.login(email, is_admin=True) response = self.get('course') assert_equals(response.status_int, 200) response = self.get('assets/js/activity-1.3.js') assert_equals(response.status_int, 200) def test_registration_closed(self): """Test student registration when course is full.""" email = 'test_registration_closed@example.com' name = 'Test Registration Closed' with actions.OverriddenEnvironment( {'reg_form': {'can_register': False}}): # Try to login and register. actions.login(email) try: actions.register(self, name) raise actions.ShouldHaveFailedByNow( 'Expected to fail: new registrations should not be allowed ' 'when registration is closed.') except actions.ShouldHaveFailedByNow as e: raise e except: # pylint: disable=bare-except pass # Verify registration link not present on /course response = self.get('course') self.assertNotIn( '<a href="register">Registration</a>', response.body) def test_registration_with_additional_fields(self): """Registers a new student with customized registration form.""" email = 'test_registration_with_additional_fields@example.com' name = 'Test Registration with Additional Fields' zipcode = '94043' score = '99' environ = { 'course': {'browsable': False}, 'reg_form': { 'additional_registration_fields': ( '\'<!-- reg_form.additional_registration_fields -->' '<li>' '<label class="form-label" for="form02"> ' 'What is your zipcode?' '</label><input name="form02" type="text"></li>' '<li>' '<label class="form-label" for="form03"> ' 'What is your score?' '</label> <input name="form03" type="text"></li>\'') } } with actions.OverriddenEnvironment(environ): # Login and register. user = actions.login(email) actions.register_with_additional_fields(self, name, zipcode, score) # Verify that registration results in capturing additional # registration questions. old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace(self.namespace) student = models.Student.get_enrolled_student_by_user(user) # Check that two registration additional fields are populated # with correct values. if student.additional_fields: json_dict = transforms.loads(student.additional_fields) assert zipcode == json_dict[2][1] assert score == json_dict[3][1] # Clean up app_context. namespace_manager.set_namespace(old_namespace) def test_permissions(self): """Test student permissions, and which pages they can view.""" email = 'test_permissions@example.com' name = 'Test Permissions' with actions.OverriddenEnvironment({'course': {'browsable': False}}): actions.login(email) actions.register(self, name) actions.Permissions.assert_enrolled(self) actions.unregister(self) actions.Permissions.assert_unenrolled(self) actions.register(self, name) actions.Permissions.assert_enrolled(self) def test_login_and_logout(self): """Test if login and logout behave as expected.""" with actions.OverriddenEnvironment({'course': {'browsable': False}}): email = 'test_login_logout@example.com' actions.Permissions.assert_logged_out(self) actions.login(email) actions.Permissions.assert_unenrolled(self) actions.logout() actions.Permissions.assert_logged_out(self) def assert_locale_settings(self): # Locale picker shown. Chooser shows only available locales. course_page = self.parse_html_string(self.get('course').body) locale_options = course_page.findall( './/select[@id="locale-select"]/option') self.assertEqual(2, len(locale_options)) self.assertEquals('en_US', locale_options[0].attrib['value']) self.assertEquals('el', locale_options[1].attrib['value']) # Set language prefs using the REST endoint # A bad XSRF token is rejected request = {'xsrf_token': '1234'} response = transforms.loads(self.post( 'rest/locale', {'request': transforms.dumps(request)}).body) self.assertEquals(403, response['status']) self.assertIn('Bad XSRF token', response['message']) xsrf_token = crypto.XsrfTokenManager.create_xsrf_token('locales') # An unavailable locale is rejected request = {'xsrf_token': xsrf_token, 'payload': {'selected': 'fr'}} response = transforms.loads(self.post( 'rest/locale', {'request': transforms.dumps(request)}).body) self.assertEquals(401, response['status']) self.assertIn('Bad locale', response['message']) # An available locale is accepted request = {'xsrf_token': xsrf_token, 'payload': {'selected': 'el'}} response = transforms.loads(self.post( 'rest/locale', {'request': transforms.dumps(request)}).body) self.assertEquals(200, response['status']) self.assertIn('OK', response['message']) # After setting locale, visit course homepage and see new locale course_page = self.parse_html_string(self.get('course').body) self.assertEquals( u'Εγγραφή', course_page.find('.//a[@href="register"]').text) def test_locale_settings(self): extra_environ = { 'course': {'locale': 'en_US', 'can_student_change_locale': True}, 'extra_locales': [ {'locale': 'el', 'availability': 'available'}, {'locale': 'fr', 'availability': 'unavailable'}]} with actions.OverriddenEnvironment(extra_environ): # Visit course home page with no locale settings and see the default # locale course_page = self.parse_html_string(self.get('course').body) self.assertEquals( 'Registration', course_page.find('.//a[@href="register"]').text) # Visit course home page with accept-language set to an available # locale course_page = self.parse_html_string( self.get('course', headers={'Accept-Language': 'el'}).body) self.assertEquals( u'Εγγραφή', course_page.find('.//a[@href="register"]').text) # Visit course home page with accept-language set to an unavailable # locale course_page = self.parse_html_string( self.get('course', headers={'Accept-Language': 'fr'}).body) self.assertEquals( u'Registration', course_page.find('.//a[@href="register"]').text) actions.login('user@place.com') self.assert_locale_settings() actions.logout() self.assert_locale_settings() self.assertEquals('el', self.testapp.cookies['cb-user-locale']) def test_lesson_activity_navigation(self): """Test navigation between lesson/activity pages.""" email = 'test_lesson_activity_navigation@example.com' name = 'Test Lesson Activity Navigation' actions.login(email) actions.register(self, name) response = self.get('unit?unit=1&lesson=1') assert_does_not_contain('Previous Page', response.body) assert_contains('Next Page', response.body) response = self.get('unit?unit=2&lesson=3') assert_contains('Previous Page', response.body) assert_contains('Next Page', response.body) response = self.get('unit?unit=3&lesson=5') assert_contains('Previous Page', response.body) assert_does_not_contain('Next Page', response.body) assert_contains('End', response.body) def test_unit_title_without_index(self): """Tests display of unit/lesson titles when unit index is not shown.""" email = 'test_unit_title_without_index@example.com' name = 'test_unit_title_without_index' actions.login(email) actions.register(self, name) response = self.get('unit?unit=2&lesson=2') assert_contains('Unit 2 - Interpreting results', response.body) with actions.OverriddenEnvironment( {'course': {'display_unit_title_without_index': True}}): response = self.get('unit?unit=2&lesson=2') assert_does_not_contain( 'Unit 2 - Interpreting results', response.body) assert_contains('Interpreting results', response.body) def test_lesson_title_without_auto_index(self): """Tests display of lesson title when auto indexing is disabled.""" email = 'test_lesson_title_without_auto_index@example.com' name = 'test_lesson_title_without_auto_index' actions.login(email) actions.register(self, name) response = self.get('unit?unit=2&lesson=2') assert_contains('2.1 When search results', response.body) assert_contains('2.2 Thinking more', response.body) assert_contains('2.3 Understand options', response.body) old_load = courses.CourseModel12.load def new_load(unused_cls, app_context): """Modify auto indexing setting for one lesson.""" course = old_load(app_context) lesson = course.get_lessons(2)[1] lesson._auto_index = False return course courses.CourseModel12.load = types.MethodType( new_load, courses.CourseModel12) response = self.get('unit?unit=2&lesson=2') assert_contains('2.1 When search results', response.body) assert_does_not_contain('2.2 Thinking more', response.body) assert_contains('Thinking more', response.body) assert_contains('2.3 Understand options', response.body) courses.CourseModel12.load = old_load def test_show_hide_unit_links_on_sidebar(self): """Test display of unit links in side bar.""" email = 'test_show_hide_unit_links_on_sidebar@example.com' name = 'Test Show/Hide of Unit Links on Side Bar' actions.login(email) actions.register(self, name) text_to_check = [ 'unit?unit=1', 'Unit 1 - ', 'unit?unit=3', 'Unit 3 - ', 'assessment?name=Mid', 'Mid-course assessment', 'unit?unit=1&lesson=5', 'Word order matters', 'unit?unit=3&lesson=4', 'OR and quotes' ] # The default behavior is to show links to other units and lessons. response = self.get('unit?unit=2') for item in text_to_check: assert_contains(item, response.body) with actions.OverriddenEnvironment( {'unit': {'show_unit_links_in_leftnav': False}}): # Check that now we don't have links to other units and lessons. response = self.get('unit?unit=2') for item in text_to_check: assert_does_not_contain(item, response.body) def test_show_hide_lesson_navigation(self): """Test display of lesson navigation buttons.""" email = 'test_show_hide_of_lesson_navigation@example.com' name = 'Test Show/Hide of Lesson Navigation' actions.login(email) actions.register(self, name) # The default behavior is to show the lesson navigation buttons. response = self.get('unit?unit=2&lesson=3') assert_contains('<div class="gcb-prev-button">', response.body) assert_contains('<div class="gcb-next-button">', response.body) with actions.OverriddenEnvironment( {'unit': {'hide_lesson_navigation_buttons': True}}): # The lesson navigation buttons should now be hidden. response = self.get('unit?unit=2&lesson=3') assert_does_not_contain( '<div class="gcb-prev-button">', response.body) assert_does_not_contain( '<div class="gcb-next-button">', response.body) def test_attempt_activity_event(self): """Test activity attempt generates event.""" email = 'test_attempt_activity_event@example.com' name = 'Test Attempt Activity Event' actions.login(email) actions.register(self, name) # Enable event recording. config.Registry.test_overrides[ lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True # Prepare event. request = {} request['source'] = 'test-source' request['payload'] = transforms.dumps({'Alice': u'Bob (тест данные)'}) # Check XSRF token is required. response = self.post('rest/events?%s' % urllib.urlencode( {'request': transforms.dumps(request)}), {}) assert_equals(response.status_int, 200) assert_contains('"status": 403', response.body) # Check PUT works. request['xsrf_token'] = XsrfTokenManager.create_xsrf_token( 'event-post') response = self.post('rest/events?%s' % urllib.urlencode( {'request': transforms.dumps(request)}), {}) assert_equals(response.status_int, 200) assert not response.body # Check event is properly recorded. old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace(self.namespace) try: events = models.EventEntity.all().fetch(1000) assert 1 == len(events) assert_contains( u'Bob (тест данные)', transforms.loads(events[0].data)['Alice']) finally: namespace_manager.set_namespace(old_namespace) # Clean up. config.Registry.test_overrides = {} def test_two_students_dont_see_each_other_pages(self): """Test a user can't see another user pages.""" email1 = 'user1@foo.com' name1 = 'User 1' email2 = 'user2@foo.com' name2 = 'User 2' # Login as one user and view 'unit' and other pages, which are not # cached. actions.login(email1) actions.register(self, name1) actions.Permissions.assert_enrolled(self) response = actions.view_unit(self) assert_contains(email1, response.body) actions.logout() # Login as another user and check that 'unit' and other pages show # the correct new email. actions.login(email2) actions.register(self, name2) actions.Permissions.assert_enrolled(self) response = actions.view_unit(self) assert_contains(email2, response.body) actions.logout() def test_xsrf_defence(self): """Test defense against XSRF attack.""" email = 'test_xsrf_defence@example.com' name = 'Test Xsrf Defence' actions.login(email) actions.register(self, name) response = self.get('student/home') edit_form = actions.get_form_by_action(response, 'student/editstudent') edit_form.set('name', 'My New Name') edit_form.set('xsrf_token', 'bad token') response = edit_form.submit(expect_errors=True) assert_equals(response.status_int, 403) def test_autoescaping(self): """Test Jinja autoescaping.""" email = 'test_autoescaping@example.com' name1 = '<script>alert(1);</script>' name2 = '<script>alert(2);</script>' actions.login(email) actions.register(self, name1) actions.check_profile(self, name1) actions.change_name(self, name2) actions.unregister(self) def test_response_headers(self): """Test dynamically-generated responses use proper headers.""" email = 'test_response_headers@example.com' name = 'Test Response Headers' actions.login(email) actions.register(self, name) response = self.get('student/home') assert_equals(response.status_int, 200) assert_contains('must-revalidate', response.headers['Cache-Control']) assert_contains('no-cache', response.headers['Cache-Control']) assert_contains('no-cache', response.headers['Pragma']) assert_contains('Mon, 01 Jan 1990', response.headers['Expires']) def test_browsability_permissions(self): """Tests that the course browsability flag works correctly.""" # By default, courses are browsable. response = self.get('course') assert_equals(response.status_int, 200) assert_contains('<a href="assessment?name=Pre"', response.body) assert_does_not_contain('progress-notstarted-Pre', response.body) actions.Permissions.assert_can_browse(self) with actions.OverriddenEnvironment({'course': {'browsable': False}}): actions.Permissions.assert_logged_out(self) # Check course page redirects. response = self.get('course', expect_errors=True) assert_equals(response.status_int, 302) class StudentUnifiedProfileTest(StudentAspectTest): """Tests student actions having unified profile enabled.""" def setUp(self): super(StudentUnifiedProfileTest, self).setUp() config.Registry.test_overrides[ models.CAN_SHARE_STUDENT_PROFILE] = True def tearDown(self): config.Registry.test_overrides = {} super(StudentUnifiedProfileTest, self).tearDown() class StaticHandlerTest(actions.TestBase): """Check serving of static resources.""" def test_static_files_cache_control(self): """Test static/zip handlers use proper Cache-Control headers.""" def assert_response(response): assert_equals(response.status_int, 200) assert_contains('max-age=600', response.headers['Cache-Control']) assert_contains('public', response.headers['Cache-Control']) assert_does_not_contain('no-cache', str(response.headers)) assert_does_not_contain('must-revalidate', str(response.headers)) # static resourse on a namespaced route assert_response(self.get('/assets/css/main.css')) # static resource from the file system on a global route assert_response(self.testapp.get( '/modules/oeditor/resources/butterbar.js')) # static resource from the zip file on a global route; it requires login assert_response(self.testapp.get( '/static/inputex-3.1.0/src/inputex/assets/skins/sam/inputex.css')) class ActivityTest(actions.TestBase): """Test for activities.""" def get_activity(self, unit_id, lesson_id, args): """Retrieve the activity page for a given unit and lesson id.""" response = self.get('activity?unit=%s&lesson=%s' % (unit_id, lesson_id)) assert_equals(response.status_int, 200) assert_contains( '<script src="assets/js/activity-%s.%s.js"></script>' % (unit_id, lesson_id), response.body) assert_contains('assets/lib/activity-generic-1.3.js', response.body) js_response = self.get('assets/lib/activity-generic-1.3.js') assert_equals(js_response.status_int, 200) # Extract XSRF token from the page. match = re.search(r'eventXsrfToken = [\']([^\']+)', response.body) assert match xsrf_token = match.group(1) args['xsrf_token'] = xsrf_token return response, args def test_activities(self): """Test that activity submissions are handled and recorded correctly.""" email = 'test_activities@google.com' name = 'Test Activities' unit_id = 1 lesson_id = 2 activity_submissions = { '1.2': { 'index': 3, 'type': 'activity-choice', 'value': 3, 'correct': True, }, } # Register. actions.login(email) actions.register(self, name) # Enable event recording. config.Registry.test_overrides[ lessons.CAN_PERSIST_ACTIVITY_EVENTS.name] = True # Navigate to the course overview page, and check that the unit shows # no progress yet. response = self.get('course') assert_equals(response.status_int, 200) assert_contains( u'id="progress-notstarted-%s"' % unit_id, response.body) old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace(self.namespace) try: response, args = self.get_activity(unit_id, lesson_id, {}) # Check that the current activity shows no progress yet. assert_contains( u'id="progress-notstarted-%s-activity"' % lesson_id, response.body) # Prepare activity submission event. args['source'] = 'attempt-activity' lesson_key = '%s.%s' % (unit_id, lesson_id) assert lesson_key in activity_submissions args['payload'] = activity_submissions[lesson_key] args['payload']['location'] = ( 'http://localhost:8080/activity?unit=%s&lesson=%s' % (unit_id, lesson_id)) args['payload'] = transforms.dumps(args['payload']) # Submit the request to the backend. response = self.post('rest/events?%s' % urllib.urlencode( {'request': transforms.dumps(args)}), {}) assert_equals(response.status_int, 200) assert not response.body # Check that the current activity shows partial progress. response, args = self.get_activity(unit_id, lesson_id, {}) assert_contains( u'id="progress-inprogress-%s-activity"' % lesson_id, response.body) # Navigate to the course overview page and check that the unit shows # partial progress. response = self.get('course') assert_equals(response.status_int, 200) assert_contains( u'id="progress-inprogress-%s"' % unit_id, response.body) finally: namespace_manager.set_namespace(old_namespace) # pylint: disable=too-many-statements def test_progress(self): """Test student activity progress in detail, using the sample course.""" class FakeHandler(object): def __init__(self, app_context): self.app_context = app_context course = Course(FakeHandler(sites.get_all_courses()[0])) tracker = course.get_progress_tracker() student = models.Student(key_name='key-test-student') # Initially, all progress entries should be set to zero. unit_progress = tracker.get_unit_progress(student) for key in unit_progress: assert unit_progress[key] == 0 lesson_progress = tracker.get_lesson_progress(student, 1) for key in lesson_progress: assert lesson_progress[key] == { 'html': 0, 'activity': 0, 'has_activity': True } # The blocks in Lesson 1.2 with activities are blocks 3 and 6. # Submitting block 3 should trigger an in-progress update. tracker.put_block_completed(student, 1, 2, 3) assert tracker.get_unit_progress(student)['1'] == 1 assert tracker.get_lesson_progress(student, 1)[2] == { 'html': 0, 'activity': 1, 'has_activity': True } # Submitting block 6 should trigger a completion update for the # activity, but Lesson 1.2 is still incomplete. tracker.put_block_completed(student, 1, 2, 6) assert tracker.get_unit_progress(student)['1'] == 1 assert tracker.get_lesson_progress(student, 1)[2] == { 'html': 0, 'activity': 2, 'has_activity': True } # Visiting the HTML page for Lesson 1.2 completes the lesson. tracker.put_html_accessed(student, 1, 2) assert tracker.get_unit_progress(student)['1'] == 1 assert tracker.get_lesson_progress(student, 1)[2] == { 'html': 2, 'activity': 2, 'has_activity': True } # Test a lesson with no interactive blocks in its activity. It should # change its status to 'completed' once it is accessed. tracker.put_activity_accessed(student, 2, 1) assert tracker.get_unit_progress(student)['2'] == 1 assert tracker.get_lesson_progress(student, 2)[1] == { 'html': 0, 'activity': 2, 'has_activity': True } # Test that a lesson without activities (Lesson 1.1) doesn't count. # Complete lessons 1.3, 1.4, 1.5 and 1.6; unit 1 should then be marked # as 'completed' even though we have no events associated with # Lesson 1.1. tracker.put_html_accessed(student, 1, 1) tracker.put_html_accessed(student, 1, 3) tracker.put_html_accessed(student, 1, 4) tracker.put_html_accessed(student, 1, 5) tracker.put_html_accessed(student, 1, 6) tracker.put_activity_completed(student, 1, 3) tracker.put_activity_completed(student, 1, 4) tracker.put_activity_completed(student, 1, 5) assert tracker.get_unit_progress(student)['1'] == 1 tracker.put_activity_completed(student, 1, 6) assert tracker.get_unit_progress(student)['1'] == 2 # Test that a unit is not completed until all HTML and activity pages # have been, at least, visited. Unit 6 has 3 lessons; the last one has # no activity block. tracker.put_html_accessed(student, 6, 1) tracker.put_html_accessed(student, 6, 2) tracker.put_activity_completed(student, 6, 1) tracker.put_activity_completed(student, 6, 2) assert tracker.get_unit_progress(student)['6'] == 1 tracker.put_activity_accessed(student, 6, 3) assert tracker.get_unit_progress(student)['6'] == 1 tracker.put_html_accessed(student, 6, 3) assert tracker.get_unit_progress(student)['6'] == 2 # Test assessment counters. pre_id = 'Pre' tracker.put_assessment_completed(student, pre_id) progress = tracker.get_or_create_progress(student) assert tracker.is_assessment_completed(progress, pre_id) assert tracker.get_assessment_status(progress, pre_id) == 1 tracker.put_assessment_completed(student, pre_id) progress = tracker.get_or_create_progress(student) assert tracker.is_assessment_completed(progress, pre_id) assert tracker.get_assessment_status(progress, pre_id) == 2 tracker.put_assessment_completed(student, pre_id) progress = tracker.get_or_create_progress(student) assert tracker.is_assessment_completed(progress, pre_id) assert tracker.get_assessment_status(progress, pre_id) == 3 # Test that invalid keys do not lead to any updates. # Invalid assessment id. fake_id = 'asdf' tracker.put_assessment_completed(student, fake_id) progress = tracker.get_or_create_progress(student) assert not tracker.is_assessment_completed(progress, fake_id) assert tracker.get_assessment_status(progress, fake_id) is None # Invalid unit id. tracker.put_activity_completed(student, fake_id, 1) progress = tracker.get_or_create_progress(student) assert tracker.get_activity_status(progress, fake_id, 1) is None # Invalid lesson id. fake_numeric_id = 22 tracker.put_activity_completed(student, 1, fake_numeric_id) progress = tracker.get_or_create_progress(student) assert tracker.get_activity_status(progress, 1, fake_numeric_id) is None # Invalid block id. tracker.put_block_completed(student, 5, 2, fake_numeric_id) progress = tracker.get_or_create_progress(student) assert not tracker.is_block_completed( progress, 5, 2, fake_numeric_id) class AssessmentTest(actions.TestBase): """Test for assessments.""" def test_course_pass(self): """Test student passing final exam.""" email = 'test_pass@google.com' name = 'Test Pass' post = {'assessment_type': 'Fin', 'score': '100.00'} # Register. actions.login(email) actions.register(self, name) # Submit answer. response = actions.submit_assessment(self, 'Fin', post) assert_equals(response.status_int, 200) assert_contains('your overall course score of 70%', response.body) assert_contains('you have passed the course', response.body) # Check that the result shows up on the profile page. response = actions.check_profile(self, name) assert_contains('70', response.body) assert_contains('100', response.body) # pylint: disable=too-many-statements def test_assessments(self): """Test assessment scores are properly submitted and summarized.""" course = courses.Course(None, app_context=sites.get_all_courses()[0]) name = 'Test Assessments' pre_answers = [{'foo': 'bar'}, {'Alice': u'Bob (тест данные)'}] pre = { 'assessment_type': 'Pre', 'score': '1.00', 'answers': transforms.dumps(pre_answers)} mid = {'assessment_type': 'Mid', 'score': '2.00'} fin = {'assessment_type': 'Fin', 'score': '3.00'} peer = {'assessment_type': 'ReviewAssessmentExample'} second_mid = {'assessment_type': 'Mid', 'score': '1.00'} second_fin = {'assessment_type': 'Fin', 'score': '100000'} # Register. user = actions.login('test_assessments@google.com') actions.register(self, name) # Navigate to the course overview page. response = self.get('course') assert_equals(response.status_int, 200) assert_does_not_contain(u'id="progress-completed-Mid', response.body) assert_contains(u'id="progress-notstarted-Mid', response.body) old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace(self.namespace) try: student = models.Student.get_enrolled_student_by_user(user) # Check that four score objects (corresponding to the four sample # assessments) exist right now, and that they all have zero # score. student_scores = course.get_all_scores(student) assert len(student_scores) == 4 for assessment in student_scores: assert assessment['score'] == 0 # Submit assessments and check that the score is updated. actions.submit_assessment(self, 'Pre', pre) student = models.Student.get_enrolled_student_by_user(user) student_scores = course.get_all_scores(student) assert len(student_scores) == 4 for assessment in student_scores: if assessment['id'] == 'Pre': assert assessment['score'] > 0 else: assert assessment['score'] == 0 actions.submit_assessment(self, 'Mid', mid) student = models.Student.get_enrolled_student_by_user(user) # Navigate to the course overview page. response = self.get('course') assert_equals(response.status_int, 200) assert_contains(u'id="progress-completed-Pre', response.body) assert_contains(u'id="progress-completed-Mid', response.body) assert_contains(u'id="progress-notstarted-Fin', response.body) # Submit the final assessment. actions.submit_assessment(self, 'Fin', fin) student = models.Student.get_enrolled_student_by_user(user) # Submit the sample peer review assessment. actions.submit_assessment(self, 'ReviewAssessmentExample', peer) student_scores = course.get_all_scores(student) # This assessment is not considered to be completed until enough # peer reviews have been submitted. for assessment in student_scores: if assessment['id'] == 'ReviewAssessmentExample': assert assessment['human_graded'] assert not assessment['completed'] # Navigate to the course overview page. response = self.get('course') assert_equals(response.status_int, 200) assert_contains(u'id="progress-completed-Fin', response.body) # Check that the overall-score is non-zero. assert course.get_overall_score(student) # Check assessment answers. answers = transforms.loads( models.StudentAnswersEntity.get_by_key_name( student.user_id).data) assert pre_answers == answers['Pre'] assert [] == answers['Mid'] assert [] == answers['Fin'] # Check that scores are recorded properly. student = models.Student.get_enrolled_student_by_user(user) assert int(course.get_score(student, 'Pre')) == 1 assert int(course.get_score(student, 'Mid')) == 2 assert int(course.get_score(student, 'Fin')) == 3 assert (int(course.get_overall_score(student)) == int((0.30 * 2) + (0.70 * 3))) # Try posting a new midcourse exam with a lower score; # nothing should change. actions.submit_assessment(self, 'Mid', second_mid) student = models.Student.get_enrolled_student_by_user(user) assert int(course.get_score(student, 'Pre')) == 1 assert int(course.get_score(student, 'Mid')) == 2 assert int(course.get_score(student, 'Fin')) == 3 assert (int(course.get_overall_score(student)) == int((0.30 * 2) + (0.70 * 3))) # Now try posting a postcourse exam with a higher score and note # the changes. actions.submit_assessment(self, 'Fin', second_fin) student = models.Student.get_enrolled_student_by_user(user) assert int(course.get_score(student, 'Pre')) == 1 assert int(course.get_score(student, 'Mid')) == 2 assert int(course.get_score(student, 'Fin')) == 100000 assert (int(course.get_overall_score(student)) == int((0.30 * 2) + (0.70 * 100000))) finally: namespace_manager.set_namespace(old_namespace) def remove_dir(dir_name): """Delete a directory.""" logging.info('removing folder: %s', dir_name) if os.path.exists(dir_name): shutil.rmtree(dir_name) if os.path.exists(dir_name): raise Exception('Failed to delete directory: %s' % dir_name) def clean_dir(dir_name): """Clean a directory.""" remove_dir(dir_name) logging.info('creating folder: %s', dir_name) os.makedirs(dir_name) if not os.path.exists(dir_name): raise Exception('Failed to create directory: %s' % dir_name) def clone_canonical_course_data(src, dst): """Makes a copy of canonical course content.""" clean_dir(dst) def copytree(name): shutil.copytree( os.path.join(src, name), os.path.join(dst, name)) copytree('assets') copytree('data') copytree('views') shutil.copy( os.path.join(src, 'course.yaml'), os.path.join(dst, 'course.yaml')) # Make all files writable. for root, unused_dirs, files in os.walk(dst): for afile in files: fname = os.path.join(root, afile) os.chmod(fname, 0o777) class GeneratedCourse(object): """A helper class for a dynamically generated course content.""" @classmethod def set_data_home(cls, test): """All data for this test will be placed here.""" cls.data_home = test.test_tempdir def __init__(self, ns): self.path = ns @property def namespace(self): return 'ns%s' % self.path @property def title(self): return u'Power Searching with Google title-%s (тест данные)' % self.path @property def unit_title(self): return u'Interpreting results unit-title-%s (тест данные)' % self.path @property def lesson_title(self): return u'Word order matters lesson-title-%s (тест данные)' % self.path @property def head(self): return '<!-- head-%s -->' % self.path @property def css(self): return '<!-- css-%s -->' % self.path @property def home(self): return os.path.join(self.data_home, 'data-%s' % self.path) @property def email(self): return 'walk_the_course_named_%s@google.com' % self.path @property def name(self): return 'Walk The Course Named %s' % self.path class MultipleCoursesTestBase(actions.TestBase): """Configures several courses for running concurrently.""" def modify_file(self, filename, find, replace): """Read, modify and write back the file.""" text = open(filename, 'r').read().decode('utf-8') # Make sure target text is not in the file. assert replace not in text text = text.replace(find, replace) assert replace in text open(filename, 'w').write(text.encode('utf-8')) def modify_canonical_course_data(self, course): """Modify canonical content by adding unique bits to it.""" self.modify_file( os.path.join(course.home, 'course.yaml'), 'title: \'Power Searching with Google\'', 'title: \'%s\'' % course.title) self.modify_file( os.path.join(course.home, 'data/unit.csv'), ',Interpreting results,', ',%s,' % course.unit_title) self.modify_file( os.path.join(course.home, 'data/lesson.csv'), ',Word order matters,', ',%s,' % course.lesson_title) self.modify_file( os.path.join(course.home, 'data/lesson.csv'), ',Interpreting results,', ',%s,' % course.unit_title) self.modify_file( os.path.join(course.home, 'views/base.html'), '<head>', '<head>\n%s' % course.head) self.modify_file( os.path.join(course.home, 'assets/css/main.css'), 'html {', '%s\nhtml {' % course.css) def prepare_course_data(self, course): """Create unique course content for a course.""" clone_canonical_course_data(self.bundle_root, course.home) self.modify_canonical_course_data(course) def setUp(self): """Configure the test.""" super(MultipleCoursesTestBase, self).setUp() GeneratedCourse.set_data_home(self) self.course_a = GeneratedCourse('a') self.course_b = GeneratedCourse('b') self.course_ru = GeneratedCourse('ru') # Override BUNDLE_ROOT. self.bundle_root = appengine_config.BUNDLE_ROOT appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home # Prepare course content. clean_dir(GeneratedCourse.data_home) self.prepare_course_data(self.course_a) self.prepare_course_data(self.course_b) self.prepare_course_data(self.course_ru) # Setup one course for I18N. self.modify_file( os.path.join(self.course_ru.home, 'course.yaml'), 'locale: \'en_US\'', 'locale: \'ru\'') # Configure courses. sites.setup_courses('%s, %s, %s' % ( 'course:/courses/a:/data-a:nsa', 'course:/courses/b:/data-b:nsb', 'course:/courses/ru:/data-ru:nsru')) def tearDown(self): """Clean up.""" sites.reset_courses() appengine_config.BUNDLE_ROOT = self.bundle_root super(MultipleCoursesTestBase, self).tearDown() def walk_the_course( self, course, first_time=True, is_admin=False, logout=True): """Visit a course as a Student would.""" with actions.OverriddenEnvironment({'course': {'browsable': False}}): # Check normal user has no access. actions.login(course.email, is_admin=is_admin) # Test schedule. if first_time: response = self.testapp.get('/courses/%s/preview' % course.path) else: response = self.testapp.get('/courses/%s/course' % course.path) assert_contains(course.title, response.body) assert_contains(course.unit_title, response.body) assert_contains(course.head, response.body) # Tests static resource. response = self.testapp.get( '/courses/%s/assets/css/main.css' % course.path) assert_contains(course.css, response.body) if first_time: # Test registration. response = self.get('/courses/%s/register' % course.path) assert_contains(course.title, response.body) assert_contains(course.head, response.body) register_form = actions.get_form_by_action(response, 'register') register_form.set('form01', course.name) register_form.action = '/courses/%s/register' % course.path response = self.submit(register_form) assert_equals(response.status_int, 302) assert_contains( 'course#registration_confirmation', response.headers[ 'location']) # Check lesson page. response = self.testapp.get( '/courses/%s/unit?unit=1&lesson=5' % course.path) assert_contains(course.title, response.body) assert_contains(course.lesson_title, response.body) assert_contains(course.head, response.body) # Check activity page. response = self.testapp.get( '/courses/%s/activity?unit=1&lesson=5' % course.path) assert_contains(course.title, response.body) assert_contains(course.lesson_title, response.body) assert_contains(course.head, response.body) if logout: actions.logout() class MultipleCoursesTest(MultipleCoursesTestBase): """Test several courses running concurrently.""" def test_courses_are_isolated(self): """Test each course serves its own assets, views and data.""" # Pretend students visit courses. self.walk_the_course(self.course_a) self.walk_the_course(self.course_b) self.walk_the_course(self.course_a, first_time=False) self.walk_the_course(self.course_b, first_time=False) # Check course namespaced data. self.validate_course_data(self.course_a) self.validate_course_data(self.course_b) # Check default namespace. assert ( namespace_manager.get_namespace() == appengine_config.DEFAULT_NAMESPACE_NAME) assert not models.Student.all().fetch(1000) def validate_course_data(self, course): """Check course data is valid.""" old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace(course.namespace) try: students = models.Student.all().fetch(1000) assert len(students) == 1 for student in students: assert_equals(course.email, student.email) assert_equals(course.name, student.name) finally: namespace_manager.set_namespace(old_namespace) class I18NTest(MultipleCoursesTestBase): """Test courses running in different locales and containing I18N content.""" def test_csv_supports_utf8(self): """Test UTF-8 content in CSV file is handled correctly.""" title_ru = u'Найди факты быстрее' csv_file = os.path.join(self.course_ru.home, 'data/unit.csv') self.modify_file( csv_file, ',Find facts faster,', ',%s,' % title_ru) self.modify_file( os.path.join(self.course_ru.home, 'data/lesson.csv'), ',Find facts faster,', ',%s,' % title_ru) rows = [] for row in csv.reader(open(csv_file)): rows.append(row) assert title_ru == rows[6][3].decode('utf-8') response = self.get('/courses/%s/course' % self.course_ru.path) assert_contains(title_ru, response.body) # Tests student perspective. self.walk_the_course(self.course_ru, first_time=True) self.walk_the_course(self.course_ru, first_time=False) # Test course author dashboard. self.walk_the_course( self.course_ru, first_time=False, is_admin=True, logout=False) dashboard_url = '/courses/%s/dashboard' % self.course_ru.path def assert_page_contains(page_name, text_array): response = self.get('%s?action=%s' % (dashboard_url, page_name)) for text in text_array: assert_contains(text, response.body) assert_page_contains('', [ title_ru, self.course_ru.unit_title, self.course_ru.lesson_title]) assert_page_contains( 'edit_questions', [self.course_ru.title]) assert_page_contains( '', [self.course_ru.title]) assert_contains( vfs.AbstractFileSystem.normpath(self.course_ru.home), self.get('%s?action=settings_about' % dashboard_url).body) # Clean up. actions.logout() def test_i18n(self): """Test course is properly internationalized.""" response = self.get('/courses/%s/course' % self.course_ru.path) assert_contains_all_of( [u'Войти', u'Учебный план', u'Курс'], response.body) class CourseUrlRewritingTestBase(actions.TestBase): """Prepare course for using rewrite rules and '/courses/pswg' base URL.""" def setUp(self): super(CourseUrlRewritingTestBase, self).setUp() self.base = '/courses/pswg' self.namespace = 'gcb-courses-pswg-tests-ns' sites.setup_courses('course:%s:/:%s' % (self.base, self.namespace)) def tearDown(self): sites.reset_courses() super(CourseUrlRewritingTestBase, self).tearDown() def canonicalize(self, href, response=None): """Canonicalize URL's using either <base> or self.base.""" # Check if already canonicalized. if href.startswith( self.base) or utils.ApplicationHandler.is_absolute(href): pass else: # Look for <base> tag in the response to compute the canonical URL. if response: return super(CourseUrlRewritingTestBase, self).canonicalize( href, response) # Prepend self.base to compute the canonical URL. if not href.startswith('/'): href = '/%s' % href href = '%s%s' % (self.base, href) self.audit_url(href) return href class VirtualFileSystemTestBase(actions.TestBase): """Prepares a course running on a virtual local file system.""" def setUp(self): """Configure the test.""" super(VirtualFileSystemTestBase, self).setUp() GeneratedCourse.set_data_home(self) # Override BUNDLE_ROOT. self.bundle_root = appengine_config.BUNDLE_ROOT appengine_config.BUNDLE_ROOT = GeneratedCourse.data_home # Prepare course content. home_folder = os.path.join(GeneratedCourse.data_home, 'data-v') clone_canonical_course_data(self.bundle_root, home_folder) # we also need resources in modules def ignore_pyc(unused_dir, filenames): return [ filename for filename in filenames if filename.endswith('.pyc')] modules_home = 'modules' shutil.copytree( os.path.join(self.bundle_root, modules_home), os.path.join(GeneratedCourse.data_home, modules_home), ignore=ignore_pyc) # Configure course. self.namespace = 'nsv' sites.setup_courses('course:/:/data-vfs:%s' % self.namespace) # Modify app_context filesystem to map /data-v to /data-vfs. def after_create(unused_cls, instance): instance._fs = vfs.AbstractFileSystem( vfs.LocalReadOnlyFileSystem( os.path.join(GeneratedCourse.data_home, 'data-vfs'), home_folder)) sites.ApplicationContext.after_create = after_create def tearDown(self): """Clean up.""" sites.reset_courses() appengine_config.BUNDLE_ROOT = self.bundle_root super(VirtualFileSystemTestBase, self).tearDown() class DatastoreBackedCourseTest(actions.TestBase): """Prepares an empty course running on datastore-backed file system.""" def setUp(self): """Configure the test.""" super(DatastoreBackedCourseTest, self).setUp() self.supports_editing = True self.namespace = 'dsbfs' sites.setup_courses('course:/::%s' % self.namespace) all_courses = sites.get_all_courses() assert len(all_courses) == 1 self.app_context = all_courses[0] def tearDown(self): """Clean up.""" sites.reset_courses() super(DatastoreBackedCourseTest, self).tearDown() def upload_all_in_dir(self, dir_name, files_added): """Uploads all files in a folder to vfs.""" root_dir = os.path.join(appengine_config.BUNDLE_ROOT, dir_name) for root, unused_dirs, files in os.walk(root_dir): for afile in files: filename = os.path.join(root, afile) self.app_context.fs.put(filename, open(filename, 'rb')) files_added.append(filename) def init_course_data(self, upload_files): """Uploads required course data files into vfs.""" files_added = [] old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(self.namespace) upload_files(files_added) # Normalize paths to be identical for Windows and Linux. files_added_normpath = [] for file_added in files_added: files_added_normpath.append( vfs.AbstractFileSystem.normpath(file_added)) assert self.app_context.fs.list( appengine_config.BUNDLE_ROOT) == sorted(files_added_normpath) finally: namespace_manager.set_namespace(old_namespace) def upload_all_sample_course_files(self, files_added): """Uploads all sample course data files into vfs.""" self.upload_all_in_dir('assets', files_added) self.upload_all_in_dir('views', files_added) self.upload_all_in_dir('data', files_added) course_yaml = os.path.join( appengine_config.BUNDLE_ROOT, 'course.yaml') self.app_context.fs.put(course_yaml, open(course_yaml, 'rb')) files_added.append(course_yaml) def calc_course_stats(self, course): assessment_count = len(course.get_assessment_list()) units_count = len(course.get_units()) activities_count = 0 lessons_count = 0 for uid in [x.unit_id for x in course.get_units()]: unit_lessons = course.get_lessons(uid) lessons_count += len(unit_lessons) activities_count += sum(x.activity for x in unit_lessons) return units_count, lessons_count, activities_count, assessment_count class DatastoreBackedCustomCourseTest(DatastoreBackedCourseTest): """Prepares a sample course running on datastore-backed file system.""" # pylint: disable=too-many-statements def test_course_import(self): """Test importing of the course.""" # Setup courses. sites.setup_courses('course:/test::ns_test, course:/:/') self.namespace = 'ns_test' self.base = '/test' config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True # Format import payload and URL. payload_dict = {} payload_dict['course'] = 'course:/:/' request = {} request['payload'] = transforms.dumps(payload_dict) import_put_url = ( 'rest/course/import?%s' % urllib.urlencode( {'request': transforms.dumps(request)})) # Check non-logged user has no rights. response = self.put(import_put_url, {}, expect_errors=True) assert_equals(404, response.status_int) # Login as admin. email = 'test_course_import@google.com' name = 'Test Course Import' actions.login(email, is_admin=True) # Check course is empty. response = self.get('dashboard') assert_equals(200, response.status_int) assert_does_not_contain('Filter image results by color', response.body) # Import sample course. request[ 'xsrf_token'] = XsrfTokenManager.create_xsrf_token('import-course') import_put_url = ( 'rest/course/import?%s' % urllib.urlencode( {'request': transforms.dumps(request)})) response = self.put(import_put_url, {}) assert_equals(200, response.status_int) assert_contains('Imported.', response.body) # Check course is not empty. response = self.get('dashboard') assert_contains('Filter image results by color', response.body) # Check assessment is copied. response = self.get('assessment?name=35') assert_equals(200, response.status_int) assert_contains('Humane Society website', response.body) # Check activity component is hidden response = self.get('dashboard?key=5&action=edit_lesson') assert_equals(200, response.status_int) assert 'excludedCustomTags\\\": [\\\"gcb-activity' in response.body # Check activity is copied. response = self.get('unit?unit=57&lesson=63') assert_equals(200, response.status_int) assert_contains( 'explore ways to keep yourself updated', response.body) unit_2_title = 'Unit 2 - Interpreting results' lesson_2_1_title = 'When search results suggest something new' lesson_2_2_title = 'Thinking more deeply about your search' # Check units and lessons are indexed correctly. response = actions.register(self, name) assert ( 'http://localhost' '/test/course' '#registration_confirmation' == response.location) response = self.get('course') assert_contains(unit_2_title, response.body) # Unit page. response = self.get('unit?unit=14') # A unit title. assert_contains( unit_2_title, response.body) # First child lesson without link. assert_contains( lesson_2_1_title, response.body) # Second child lesson with link. assert_contains( lesson_2_2_title, response.body) # Breadcrumbs. assert_contains_all_of( ['Unit 2</a></li>', 'Lesson 1</li>'], response.body) # Unit page. response = self.get('unit?unit=14&lesson=16') # A unit title. assert_contains( unit_2_title, response.body) # An activity title. assert_contains( 'Activity', response.body) # First child lesson without link. assert_contains( lesson_2_1_title, response.body) # Second child lesson with link. assert_contains( lesson_2_2_title, response.body) # Breadcrumbs. assert_contains_all_of( ['Unit 2</a></li>', '<a href="unit?unit=14&lesson=15">', '<a href="unit?unit=14&lesson=17">'], response.body) assert '<a href="unit?unit=14&lesson=16">' not in response.body # Clean up. sites.reset_courses() config.Registry.test_overrides = {} def test_get_put_file(self): """Test that one can put/get file via REST interface.""" self.init_course_data(self.upload_all_sample_course_files) email = 'test_get_put_file@google.com' actions.login(email, is_admin=True) response = self.get('dashboard?action=settings_advanced') # Check course.yaml edit form. compute_form = response.forms['edit_course_yaml'] response = self.submit(compute_form) assert_equals(response.status_int, 302) assert_contains( 'dashboard?action=edit_settings&from_action=settings_advanced&' 'key=%2Fcourse.yaml', response.location) response = self.get(response.location) assert_contains('rest/files/item?key=%2Fcourse.yaml', response.body) # Get text file. response = self.get('rest/files/item?key=%2Fcourse.yaml') assert_equals(response.status_int, 200) json_dict = transforms.loads( transforms.loads(response.body)['payload']) assert '/course.yaml' == json_dict['key'] assert 'text/utf-8' == json_dict['encoding'] assert (open(os.path.join( appengine_config.BUNDLE_ROOT, 'course.yaml')).read( ) == json_dict['content']) def test_empty_course(self): """Test course with no assets and the simplest possible course.yaml.""" email = 'test_empty_course@google.com' actions.login(email, is_admin=True) # Check minimal course page comes up. response = self.get('course') assert_contains('UNTITLED COURSE', response.body) assert_contains('Registration', response.body) # Check inheritable files are accessible. response = self.get('/assets/css/main.css') assert (open(os.path.join( appengine_config.BUNDLE_ROOT, 'assets/css/main.css')).read( ) == response.body) # Check non-inheritable files are not inherited. response = self.testapp.get( '/assets/js/activity-1.3.js', expect_errors=True) assert_equals(response.status_int, 404) # Login as admin. email = 'test_empty_course@google.com' actions.login(email, is_admin=True) response = self.get('dashboard') # Add unit. compute_form = response.forms['add_unit'] response = self.submit(compute_form) response = self.get('/rest/course/unit?key=1') assert_equals(response.status_int, 200) # Add lessons. response = self.get('dashboard') compute_form = None for form in response.forms.values(): if form.action == 'dashboard?action=add_lesson': compute_form = form response = self.submit(compute_form) response = self.get('/rest/course/lesson?key=2') assert_equals(response.status_int, 200) # Add assessment. response = self.get('dashboard') compute_form = response.forms['add_assessment'] response = self.submit(compute_form) response = self.get('/rest/course/assessment?key=3') assert_equals(response.status_int, 200) # Add link. response = self.get('dashboard') compute_form = response.forms['add_link'] response = self.submit(compute_form) response = self.get('/rest/course/link?key=4') assert_equals(response.status_int, 200) def import_sample_course(self): """Imports a sample course.""" # setup courses sites.setup_courses('course:/test::ns_test, course:/:/') # check we have no questions or question gourps in neither source nor # destination course with Namespace(''): self.assertEqual(0, len(models.QuestionGroupDAO.get_all())) self.assertEqual(0, len(models.QuestionDAO.get_all())) with Namespace('ns_test'): self.assertEqual(0, len(models.QuestionGroupDAO.get_all())) self.assertEqual(0, len(models.QuestionDAO.get_all())) # import sample course dst_app_context = sites.get_all_courses()[0] dst_course = courses.Course(None, app_context=dst_app_context) src_app_context = sites.get_all_courses()[1] src_course = courses.Course(None, app_context=src_app_context) errors = [] _, dst_course_out = dst_course.import_from( src_app_context, errors) if errors: raise Exception(errors) dst_course_out.save() u1, l1, ac1, as1 = self.calc_course_stats(src_course) u2, l2, _, as2 = self.calc_course_stats(dst_course) # the old and the new course have same number of units each self.assertEqual(12, u1) self.assertEqual(12, u2) # old course had lessons and activities self.assertEqual(29, l1) self.assertEqual(26, ac1) # new course has the same number of lessons as the old one, plus one # new lesson instead of each old activity self.assertEqual(55, l2) self.assertEqual(l1 + ac1, l2) # both the new & the old courses have 4 assessments self.assertEqual(4, as1) self.assertEqual(4, as2) # new course assessment weights are equal to 25.0 for x in dst_course.get_assessment_list(): self.assertEqual(25.0, x.weight) # old course does not have any questions or question groups with Namespace(''): self.assertEqual(0, len(models.QuestionGroupDAO.get_all())) self.assertEqual(0, len(models.QuestionDAO.get_all())) # new course has new questions and question groups that used to be old # style activities with Namespace('ns_test'): self.assertEqual(2, len(models.QuestionGroupDAO.get_all())) self.assertEqual(69, len(models.QuestionDAO.get_all())) # clean up sites.reset_courses() def test_imported_course_performance(self): """Tests various pages of the imported course.""" self.import_sample_course() # Install a clone on the '/' so all the tests will treat it as normal # sample course. sites.setup_courses('course:/::ns_test') self.namespace = 'ns_test' # Enable memcache. config.Registry.test_overrides[ models.CAN_USE_MEMCACHE.name] = True with actions.OverriddenEnvironment({ 'course': { 'now_available': True, 'browsable': False}}): def custom_inc(unused_increment=1, context=None): """A custom inc() function for cache miss counter.""" self.keys.append(context) self.count += 1 def assert_cached(url, assert_text, cache_miss_allowed=0): """Checks that specific URL supports caching.""" memcache.flush_all() # Expect cache misses first time we load page. cache_miss_before = self.count response = self.get(url) assert_contains(assert_text, response.body) assert cache_miss_before != self.count # Expect no cache misses first time we load page. self.keys = [] cache_miss_before = self.count response = self.get(url) assert_contains(assert_text, response.body) cache_miss_actual = self.count - cache_miss_before if cache_miss_actual != cache_miss_allowed: raise Exception( 'Expected %s cache misses, got %s. Keys are:\n%s' % ( cache_miss_allowed, cache_miss_actual, '\n'.join(self.keys))) self.keys = [] self.count = 0 old_inc = models.CACHE_MISS.inc models.CACHE_MISS.inc = custom_inc # Walk the site. email = 'test_units_lessons@google.com' name = 'Test Units Lessons' assert_cached('preview', 'Putting it all together') actions.login(email, is_admin=True) assert_cached('preview', 'Putting it all together') actions.register(self, name) assert_cached('course', 'Putting it all together') assert_cached( 'unit?unit=14', 'When search results suggest something new') assert_cached( 'unit?unit=14&lesson=19', 'Understand options for different media') # Clean up. models.CACHE_MISS.inc = old_inc config.Registry.test_overrides = {} sites.reset_courses() def test_imported_course(self): """Tests various pages of the imported course.""" # TODO(psimakov): Ideally, this test class should run all aspect tests # and they all should pass. However, the id's in the cloned course # do not match the id's of source sample course and we fetch pages # and assert page content using id's. For now, we will check the minimal # set of pages manually. Later, we have to make it run all known tests. self.import_sample_course() # Install a clone on the '/' so all the tests will treat it as normal # sample course. sites.setup_courses('course:/::ns_test') self.namespace = 'ns_test' email = 'test_units_lessons@google.com' name = 'Test Units Lessons' actions.login(email, is_admin=True) response = self.get('course') assert_contains('Putting it all together', response.body) actions.register(self, name) actions.check_profile(self, name) actions.view_announcements(self) # Check unit page without lesson specified. response = self.get('unit?unit=14') assert_contains('Interpreting results', response.body) assert_contains( 'When search results suggest something new', response.body) # Check unit page with a lessons. response = self.get('unit?unit=14&lesson=19') assert_contains('Interpreting results', response.body) assert_contains( 'Understand options for different media', response.body) # Check assesment page. response = self.get('assessment?name=35') self.assertEqual(5, response.body.count('<div class="qt-question">')) # Check activity page. response = self.get('unit?unit=14&lesson=16') assert_contains('Activity', response.body) # Clean up. sites.reset_courses() def test_readonly_caching(self): self.import_sample_course() sites.setup_courses('course:/::ns_test') self.namespace = 'ns_test' course = sites.get_all_courses()[0] fn = os.path.join( appengine_config.BUNDLE_ROOT, 'data/course.json') config.Registry.test_overrides[ models.CAN_USE_MEMCACHE.name] = True # get the page and record hits and misses hit_local_before = models.CACHE_HIT_LOCAL.value hit_before = models.CACHE_HIT.value miss_local_before = models.CACHE_MISS_LOCAL.value miss_before = models.CACHE_MISS.value course.fs.impl.get(fn) hit_local_after = models.CACHE_HIT_LOCAL.value hit_after = models.CACHE_HIT.value miss_local_after = models.CACHE_MISS_LOCAL.value miss_after = models.CACHE_MISS.value self.assertEquals(hit_after, hit_before) self.assertEquals(miss_after, miss_before) self.assertEquals(hit_local_after, hit_local_before) self.assertEquals(miss_local_after, miss_local_before) # enable read_only caching and repeat models.MemcacheManager.begin_readonly() try: # first fetch chould miss local cache, but hit memcache hit_local_before = models.CACHE_HIT_LOCAL.value hit_before = models.CACHE_HIT.value miss_local_before = models.CACHE_MISS_LOCAL.value miss_before = models.CACHE_MISS.value course.fs.impl.get(fn) hit_local_after = models.CACHE_HIT_LOCAL.value hit_after = models.CACHE_HIT.value miss_local_after = models.CACHE_MISS_LOCAL.value miss_after = models.CACHE_MISS.value self.assertEquals(hit_after, hit_before) self.assertEquals(miss_after, miss_before) self.assertEquals(hit_local_after, hit_local_before) self.assertEquals(miss_local_after, miss_local_before) # second fetch must hit local cache, and not hit memcache hit_local_before = models.CACHE_HIT_LOCAL.value hit_before = models.CACHE_HIT.value miss_local_before = models.CACHE_MISS_LOCAL.value miss_before = models.CACHE_MISS.value course.fs.impl.get(fn) hit_local_after = models.CACHE_HIT_LOCAL.value hit_after = models.CACHE_HIT.value miss_local_after = models.CACHE_MISS_LOCAL.value miss_after = models.CACHE_MISS.value self.assertEquals(hit_after, hit_before) self.assertEquals(miss_after, miss_before) self.assertEquals(hit_local_after, hit_local_before) self.assertEquals(miss_local_after, miss_local_before) finally: models.MemcacheManager.end_readonly() class DatastoreBackedSampleCourseTest(DatastoreBackedCourseTest): """Run all existing tests using datastore-backed file system.""" def setUp(self): super(DatastoreBackedSampleCourseTest, self).setUp() self.init_course_data(self.upload_all_sample_course_files) class LessonComponentsTest(DatastoreBackedCourseTest): """Test operations that make use of components in a lesson body.""" def setUp(self): """Set up the dummy course for each test case in this class.""" super(LessonComponentsTest, self).setUp() self.course = courses.Course(None, app_context=self.app_context) self.unit = self.course.add_unit() self.lesson = self.course.add_lesson(self.unit) self.lesson.objectives = """ <question quid="123" weight="1" instanceid="QN"></question> random_text <gcb-youtube videoid="Kdg2drcUjYI" instanceid="VD"></gcb-youtube> more_random_text <question-group qgid="456" instanceid="QG"></question-group> yet_more_random_text """ self.lesson.has_activity = False self.course.update_lesson(self.lesson) self.course.save() self.tracker = self.course.get_progress_tracker() def _assert_components(self, cpt_list): assert cpt_list == [ {'instanceid': 'QN', 'quid': '123', 'weight': '1', 'cpt_name': 'question'}, {'instanceid': 'VD', 'cpt_name': 'gcb-youtube', 'videoid': 'Kdg2drcUjYI'}, {'instanceid': 'QG', 'qgid': '456', 'cpt_name': 'question-group'} ] valid_cpt_ids = self.tracker.get_valid_component_ids( self.unit.unit_id, self.lesson.lesson_id) self.assertEqual(set(['QN', 'QG']), set(valid_cpt_ids)) def test_component_discovery(self): """Test extraction of components from a lesson body.""" cpt_list = self.course.get_components( self.unit.unit_id, self.lesson.lesson_id) self._assert_components(cpt_list) def test_component_discovery_using_html5lib(self): """Test extraction of components from a lesson body using html5lib.""" cpt_list = self.course.get_components( self.unit.unit_id, self.lesson.lesson_id, use_lxml=False) self._assert_components(cpt_list) def test_component_progress(self): """Test that progress tracking for components is done correctly.""" unit_id = self.unit.unit_id lesson_id = self.lesson.lesson_id student = models.Student(key_name='lesson-body-test-student') assert self.tracker.get_unit_progress(student)[unit_id] == 0 assert self.tracker.get_lesson_progress( student, unit_id)[lesson_id] == { 'html': 0, 'activity': 0, 'has_activity': False} # Visiting the lesson page has no effect on progress, since it contains # trackable components. self.tracker.put_html_accessed(student, unit_id, lesson_id) assert self.tracker.get_unit_progress(student)[unit_id] == 0 assert self.tracker.get_lesson_progress( student, unit_id)[lesson_id] == { 'html': 0, 'activity': 0, 'has_activity': False} # Marking progress for a non-existent component id has no effect. self.tracker.put_component_completed(student, unit_id, lesson_id, 'a') assert self.tracker.get_unit_progress(student)[unit_id] == 0 assert self.tracker.get_lesson_progress( student, unit_id)[lesson_id] == { 'html': 0, 'activity': 0, 'has_activity': False} # Marking progress for a non-trackable component id has no effect. self.tracker.put_component_completed(student, unit_id, lesson_id, 'VD') assert self.tracker.get_unit_progress(student)[unit_id] == 0 assert self.tracker.get_lesson_progress( student, unit_id)[lesson_id] == { 'html': 0, 'activity': 0, 'has_activity': False} # Completing a trackable component marks the lesson as in-progress, self.tracker.put_component_completed(student, unit_id, lesson_id, 'QN') assert self.tracker.get_unit_progress(student)[unit_id] == 1 assert self.tracker.get_lesson_progress( student, unit_id)[lesson_id] == { 'html': 1, 'activity': 0, 'has_activity': False} # Completing the same component again has no further effect. self.tracker.put_component_completed(student, unit_id, lesson_id, 'QN') assert self.tracker.get_unit_progress(student)[unit_id] == 1 assert self.tracker.get_lesson_progress( student, unit_id)[lesson_id] == { 'html': 1, 'activity': 0, 'has_activity': False} # Completing the other trackable component marks the lesson (and unit) # as completed. self.tracker.put_component_completed(student, unit_id, lesson_id, 'QG') assert self.tracker.get_unit_progress(student)[unit_id] == 2 assert self.tracker.get_lesson_progress( student, unit_id)[lesson_id] == { 'html': 2, 'activity': 0, 'has_activity': False} class EtlTestEntityPii(entities.BaseEntity): name = db.StringProperty(indexed=False) score = db.IntegerProperty(indexed=False) _PROPERTY_EXPORT_BLACKLIST = [name] @classmethod def safe_key(cls, db_key, transform_fn): return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name())) class EtlTestEntityPiiReference(entities.BaseEntity): pii = db.ReferenceProperty(EtlTestEntityPii) class EtlTestEntityIllegal(entities.BaseEntity): score = db.IntegerProperty(indexed=False) thingy = student_work.KeyProperty() class EtlMainTestCase(testing.EtlTestBase, DatastoreBackedCourseTest): """Tests tools/etl/etl.py's main().""" def setUp(self): """Configures EtlMainTestCase.""" super(EtlMainTestCase, self).setUp() self.archive_path = os.path.join(self.test_tempdir, 'archive.zip') self.new_course_title = 'New Course Title' self.common_args = [ self.url_prefix, 'myapp', 'localhost:8080'] self.common_command_args = self.common_args + [ '--archive_path', self.archive_path] self.common_course_args = [etl._TYPE_COURSE] + self.common_command_args self.common_datastore_args = [ etl._TYPE_DATASTORE] + self.common_command_args self.delete_datastore_args = etl.create_args_parser().parse_args( [etl._MODE_DELETE, etl._TYPE_DATASTORE] + self.common_args) self.download_course_args = etl.create_args_parser().parse_args( [etl._MODE_DOWNLOAD] + self.common_course_args) self.upload_course_args = etl.create_args_parser().parse_args( [etl._MODE_UPLOAD] + self.common_course_args) self.make_items_distinct_counter = 0 def create_app_yaml(self, context, title=None): yaml = copy.deepcopy(courses.DEFAULT_COURSE_YAML_DICT) if title: yaml['course']['title'] = title context.fs.impl.put( os.path.join( appengine_config.BUNDLE_ROOT, etl._COURSE_YAML_PATH_SUFFIX), etl._ReadWrapper(str(yaml)), is_draft=False) def create_archive(self): self.upload_all_sample_course_files([]) self.import_sample_course() args = etl.create_args_parser().parse_args(['download'] + self.common_course_args) etl.main(args, environment_class=testing.FakeEnvironment) sites.reset_courses() def create_archive_with_question(self, data): self.upload_all_sample_course_files([]) self.import_sample_course() question = _add_data_entity( sites.get_all_courses()[1], models.QuestionEntity, data) args = etl.create_args_parser().parse_args(['download'] + self.common_course_args) etl.main(args, environment_class=testing.FakeEnvironment) sites.reset_courses() return question def create_empty_course(self, raw): sites.setup_courses(raw) context = etl_lib.get_context(self.url_prefix) course = etl_lib.get_course(etl_lib.get_context(self.url_prefix)) course.delete_all() # delete all other entities from data store with Namespace(self.namespace): db.delete(db.Query(keys_only=True)) self.create_app_yaml(context) def import_sample_course(self): """Imports a sample course.""" # Import sample course. dst_app_context = sites.get_all_courses()[0] src_app_context = sites.get_all_courses()[1] # Patch in a course.yaml. self.create_app_yaml(dst_app_context, title=self.new_course_title) dst_course = courses.Course(None, app_context=dst_app_context) errors = [] src_course_out, dst_course_out = dst_course.import_from( src_app_context, errors) if errors: raise Exception(errors) assert len( src_course_out.get_units()) == len(dst_course_out.get_units()) dst_course_out.save() def test_archive_size_can_exceed_2_gb(self): # The maximum size for any file in the zipfile is 1 GB. byte = '.' gig = byte * (2 ** 30) archive = etl._init_archive(self.archive_path, etl.ARCHIVE_TYPE_ZIP) archive.open('w') archive.add(os.path.join(self.test_tempdir, 'first'), gig) archive.add(os.path.join(self.test_tempdir, 'second'), gig) archive.add(os.path.join(self.test_tempdir, 'overflow'), byte) archive.close() def test_delete_course_fails(self): args = etl.create_args_parser().parse_args( [etl._MODE_DELETE, etl._TYPE_COURSE] + self.common_args) self.assertRaises( NotImplementedError, etl.main, args, environment_class=testing.FakeEnvironment) def test_delete_datastore_fails_if_user_does_not_confirm(self): self.swap( etl, '_raw_input', lambda x: 'not' + etl._DELETE_DATASTORE_CONFIRMATION_INPUT) self.assertRaises( SystemExit, etl.main, self.delete_datastore_args, environment_class=testing.FakeEnvironment) def test_delete_datastore_succeeds(self): """Tests delete datastore success for populated and empty datastores.""" self.import_sample_course() context = etl_lib.get_context( self.delete_datastore_args.course_url_prefix) self.swap( etl, '_raw_input', lambda x: etl._DELETE_DATASTORE_CONFIRMATION_INPUT) # Spot check that some kinds are populated. old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(context.get_namespace_name()) self.assertTrue(vfs.FileDataEntity.all().get()) self.assertTrue(vfs.FileMetadataEntity.all().get()) finally: namespace_manager.set_namespace(old_namespace) # Delete against a datastore with contents runs successfully. etl.main( self.delete_datastore_args, environment_class=testing.FakeEnvironment) # Spot check that those kinds are now empty. try: namespace_manager.set_namespace(context.get_namespace_name()) self.assertFalse(vfs.FileDataEntity.all().get()) self.assertFalse(vfs.FileMetadataEntity.all().get()) finally: namespace_manager.set_namespace(old_namespace) # Delete against a datastore without contents runs successfully. etl.main( self.delete_datastore_args, environment_class=testing.FakeEnvironment) def test_disable_remote_cannot_be_passed_for_mode_other_than_run(self): bad_args = etl.create_args_parser().parse_args( [etl._MODE_DOWNLOAD] + self.common_course_args + ['--disable_remote']) self.assertRaises( SystemExit, etl.main, bad_args, environment_class=testing.FakeEnvironment) def test_download_course_creates_valid_archive(self): """Tests download of course data and archive creation.""" self.upload_all_sample_course_files([]) self.import_sample_course() question = _add_data_entity( sites.get_all_courses()[0], models.QuestionEntity, 'test question') etl.main( self.download_course_args, environment_class=testing.FakeEnvironment) # Don't use Archive and Manifest here because we want to test the raw # structure of the emitted zipfile. zip_archive = zipfile.ZipFile(self.archive_path) # check manifest manifest = transforms.loads( zip_archive.open(etl._MANIFEST_FILENAME).read()) self.assertGreaterEqual( courses.COURSE_MODEL_VERSION_1_3, manifest['version']) self.assertEqual( 'course:%s::ns_test' % self.url_prefix, manifest['raw']) # check content for entity in manifest['entities']: self.assertTrue(entity.has_key('is_draft')) self.assertTrue(zip_archive.open(entity['path'])) # check question question_json = transforms.loads( zip_archive.open('models/QuestionEntity.json').read()) self.assertEqual( question.key().id(), question_json['rows'][-1]['key.id']) self.assertEqual( 'test question', question_json['rows'][-1]['data']) # 69 from the import plus the one we created in the test self.assertEqual(70, len(question_json['rows'])) def test_download_course_errors_if_archive_path_exists_on_disk(self): self.upload_all_sample_course_files([]) self.import_sample_course() etl.main( self.download_course_args, environment_class=testing.FakeEnvironment) self.assertRaises( SystemExit, etl.main, self.download_course_args, environment_class=testing.FakeEnvironment) def test_download_errors_if_course_url_prefix_does_not_exist(self): sites.reset_courses() self.assertRaises( SystemExit, etl.main, self.download_course_args, environment_class=testing.FakeEnvironment) def test_download_course_errors_if_course_version_is_pre_1_3(self): args = etl.create_args_parser().parse_args( ['download', 'course', '/'] + self.common_course_args[2:]) self.upload_all_sample_course_files([]) self.import_sample_course() self.assertRaises( SystemExit, etl.main, args, environment_class=testing.FakeEnvironment) def test_download_datastore_fails_if_datastore_types_not_in_datastore(self): download_datastore_args = etl.create_args_parser().parse_args( [etl._MODE_DOWNLOAD] + self.common_datastore_args + ['--datastore_types', 'missing']) self.assertRaises( SystemExit, etl.main, download_datastore_args, environment_class=testing.FakeEnvironment) def test_download_datastore_succeeds(self): """Test download of datastore data and archive creation.""" download_datastore_args = etl.create_args_parser().parse_args( [etl._MODE_DOWNLOAD] + self.common_datastore_args + ['--datastore_types', 'Student,StudentPropertyEntity']) context = etl_lib.get_context(download_datastore_args.course_url_prefix) old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(context.get_namespace_name()) first_student = models.Student(key_name='first_student') second_student = models.Student(key_name='second_student') first_entity = models.StudentPropertyEntity( key_name='first_student-property_entity') second_entity = models.StudentPropertyEntity( key_name='second_student-property_entity') db.put([first_student, second_student, first_entity, second_entity]) finally: namespace_manager.set_namespace(old_namespace) etl.main( download_datastore_args, environment_class=testing.FakeEnvironment) archive = etl._init_archive(self.archive_path, etl.ARCHIVE_TYPE_ZIP) archive.open('r') self.assertEqual( ['Student.json', 'StudentPropertyEntity.json'], sorted( [os.path.basename(e.path) for e in archive.manifest.entities])) student_entity = [ e for e in archive.manifest.entities if e.path.endswith('Student.json')][0] entity_entity = [ e for e in archive.manifest.entities if e.path.endswith('StudentPropertyEntity.json')][0] # Ensure .json files are deserializable into Python objects. students = sorted( transforms.loads(archive.get(student_entity.path))['rows'], key=lambda d: d['key.name']) entitiez = sorted( transforms.loads(archive.get(entity_entity.path))['rows'], key=lambda d: d['key.name']) # Spot check their contents. self.assertEqual( [model.key().name() for model in [first_student, second_student]], [student['key.name'] for student in students]) self.assertEqual( [model.key().name() for model in [first_entity, second_entity]], [entity['key.name'] for entity in entitiez]) def test_download_datastore_with_privacy_maintains_references(self): """Test download of datastore data and archive creation.""" unsafe_user_id = '1' download_datastore_args = etl.create_args_parser().parse_args( [etl._MODE_DOWNLOAD] + self.common_datastore_args + ['--datastore_types', 'EventEntity,Student', '--privacy', '--privacy_secret', 'super_seekrit']) context = etl_lib.get_context(download_datastore_args.course_url_prefix) old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(context.get_namespace_name()) event = models.EventEntity(user_id=unsafe_user_id) student = models.Student( key_name='first_student', user_id=unsafe_user_id) db.put([event, student]) finally: namespace_manager.set_namespace(old_namespace) etl.main( download_datastore_args, environment_class=testing.FakeEnvironment) archive = etl._init_archive(self.archive_path, etl.ARCHIVE_TYPE_ZIP) archive.open('r') self.assertEqual( ['EventEntity.json', 'Student.json'], sorted( [os.path.basename(e.path) for e in archive.manifest.entities])) event_entity_entity = [ e for e in archive.manifest.entities if e.path.endswith('EventEntity.json')][0] student_entity = [ e for e in archive.manifest.entities if e.path.endswith('Student.json')][0] # Ensure .json files are deserializable into Python objects... event_entities = transforms.loads( archive.get(event_entity_entity.path))['rows'] students = transforms.loads(archive.get(student_entity.path))['rows'] # Reference maintained. self.assertEqual(event_entities[0]['user_id'], students[0]['user_id']) # But user_id transformed. self.assertNotEqual(unsafe_user_id, event_entities[0]['user_id']) self.assertNotEqual(unsafe_user_id, students[0]['user_id']) def test_privacy_fails_if_not_downloading_datastore(self): wrong_mode = etl.create_args_parser().parse_args( [etl._MODE_UPLOAD] + self.common_datastore_args + ['--privacy']) self.assertRaises( SystemExit, etl.main, wrong_mode, environment_class=testing.FakeEnvironment) wrong_type = etl.create_args_parser().parse_args( [etl._MODE_DOWNLOAD] + self.common_course_args + ['--privacy']) self.assertRaises( SystemExit, etl.main, wrong_type, environment_class=testing.FakeEnvironment) def test_privacy_secret_fails_if_not_download_datastore_with_privacy(self): """Tests invalid flag combinations related to --privacy.""" missing_privacy = etl.create_args_parser().parse_args( [etl._MODE_DOWNLOAD] + self.common_datastore_args + ['--privacy_secret', 'foo']) self.assertRaises( SystemExit, etl.main, missing_privacy, environment_class=testing.FakeEnvironment) self.assertRaises( SystemExit, etl.main, missing_privacy, environment_class=testing.FakeEnvironment) wrong_mode = etl.create_args_parser().parse_args( [etl._MODE_UPLOAD] + self.common_datastore_args + ['--privacy_secret', 'foo', '--privacy']) self.assertRaises( SystemExit, etl.main, wrong_mode, environment_class=testing.FakeEnvironment) wrong_type = etl.create_args_parser().parse_args( [etl._MODE_DOWNLOAD] + self.common_course_args + ['--privacy_secret', 'foo', '--privacy']) self.assertRaises( SystemExit, etl.main, wrong_type, environment_class=testing.FakeEnvironment) def test_run_fails_when_delegated_argument_parsing_fails(self): bad_args = etl.create_args_parser().parse_args( ['run', 'tools.etl_lib.Job'] + self.common_args + ['--job_args', "'unexpected_argument'"]) self.assertRaises( SystemExit, etl.main, bad_args, environment_class=testing.FakeEnvironment) def test_run_fails_when_if_requested_class_missing_or_invalid(self): bad_args = etl.create_args_parser().parse_args( ['run', 'a.missing.class.or.Module'] + self.common_args) self.assertRaises( SystemExit, etl.main, bad_args, environment_class=testing.FakeEnvironment) bad_args = etl.create_args_parser().parse_args( ['run', 'tools.etl.etl._ZipArchive'] + self.common_args) self.assertRaises( SystemExit, etl.main, bad_args, environment_class=testing.FakeEnvironment) def test_run_print_memcache_stats_succeeds(self): """Tests examples.WriteStudentEmailsToFile prints stats to stdout.""" args = etl.create_args_parser().parse_args( ['run', 'tools.etl.examples.PrintMemcacheStats'] + self.common_args) memcache.get('key') memcache.set('key', 1) memcache.get('key') old_stdout = sys.stdout stdout = cStringIO.StringIO() try: sys.stdout = stdout etl.main(args, environment_class=testing.FakeEnvironment) finally: sys.stdout = old_stdout expected0 = examples.PrintMemcacheStats._STATS_TEMPLATE % { 'byte_hits': 1, 'bytes': 1, 'hits': 1, 'items': 1, 'misses': 1, 'oldest_item_age': 0, } expected1 = examples.PrintMemcacheStats._STATS_TEMPLATE % { 'byte_hits': 1, 'bytes': 1, 'hits': 1, 'items': 1, 'misses': 1, 'oldest_item_age': 1, } actual = stdout.getvalue() self.assertTrue(expected0 in actual or expected1 in actual) def test_run_skips_remote_env_setup_when_disable_remote_passed(self): args = etl.create_args_parser().parse_args( ['run', 'tools.etl.etl_lib.Job'] + self.common_args + ['--disable_remote']) etl.main(args) def test_run_upload_file_to_course_succeeds(self): """Tests upload of a single local file to a course.""" path = os.path.join(self.test_tempdir, 'file') target = 'assets/file' remote_path = os.path.join(appengine_config.BUNDLE_ROOT, target) contents = 'contents' with open(path, 'w') as f: f.write(contents) args = etl.create_args_parser().parse_args( ['run', 'tools.etl.examples.UploadFileToCourse'] + self.common_args + ['--job_args=%s %s' % (path, target)]) sites.setup_courses(self.raw) context = etl_lib.get_context(args.course_url_prefix) self.assertFalse(context.fs.impl.get(remote_path)) etl.main(args, environment_class=testing.FakeEnvironment) self.assertEqual(contents, context.fs.impl.get(remote_path).read()) def test_run_write_student_emails_to_file_succeeds(self): """Tests args passed to and run of examples.WriteStudentEmailsToFile.""" email1 = 'email1@example.com' email2 = 'email2@example.com' path = os.path.join(self.test_tempdir, 'emails') args = etl.create_args_parser().parse_args( ['run', 'tools.etl.examples.WriteStudentEmailsToFile'] + self.common_args + ['--job_args=%s --batch_size 1' % path]) context = etl_lib.get_context(args.course_url_prefix) old_namespace = namespace_manager.get_namespace() try: namespace_manager.set_namespace(context.get_namespace_name()) first_student = models.Student(key_name=email1) second_student = models.Student(key_name=email2) db.put([first_student, second_student]) finally: namespace_manager.set_namespace(old_namespace) etl.main(args, environment_class=testing.FakeEnvironment) self.assertEqual('%s\n%s\n' % (email1, email2), open(path).read()) def test_upload_course_fails_if_archive_cannot_be_opened(self): sites.setup_courses(self.raw) self.assertRaises( SystemExit, etl.main, self.upload_course_args, environment_class=testing.FakeEnvironment) def test_upload_course_fails_if_archive_course_json_malformed(self): self.create_archive() self.create_empty_course(self.raw) zip_archive = zipfile.ZipFile(self.archive_path, 'a') zip_archive.writestr( etl._AbstractArchive.get_internal_path( etl._COURSE_JSON_PATH_SUFFIX), 'garbage') zip_archive.close() self.assertRaises( SystemExit, etl.main, self.upload_course_args, environment_class=testing.FakeEnvironment) def test_upload_course_fails_if_archive_course_yaml_malformed(self): self.create_archive() self.create_empty_course(self.raw) zip_archive = zipfile.ZipFile(self.archive_path, 'a') zip_archive.writestr( etl._AbstractArchive.get_internal_path( etl._COURSE_YAML_PATH_SUFFIX), '{') zip_archive.close() self.assertRaises( SystemExit, etl.main, self.upload_course_args, environment_class=testing.FakeEnvironment) def test_upload_course_fails_if_course_has_non_course_yaml_contents(self): self.upload_all_sample_course_files([]) self.import_sample_course() self.assertRaises( SystemExit, etl.main, self.upload_course_args, environment_class=testing.FakeEnvironment) def test_upload_course_fails_if_force_overwrite_passed_with_bad_args(self): self.create_archive() bad_args = etl.create_args_parser().parse_args( [etl._MODE_UPLOAD] + self.common_datastore_args + [ '--force_overwrite']) self.assertRaises( SystemExit, etl.main, bad_args, environment_class=testing.FakeEnvironment) def test_upload_course_fails_if_no_course_with_url_prefix_found(self): self.create_archive() self.assertRaises( SystemExit, etl.main, self.upload_course_args, environment_class=testing.FakeEnvironment) def _get_all_entity_files(self): files = [] all_entities = list(courses.COURSE_CONTENT_ENTITIES) + list( courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT) for entity in all_entities: files.append('%s.json' % entity.__name__) return files def test_upload_course_succeeds(self): """Tests upload of archive contents.""" question = self.create_archive_with_question('test question') self.create_empty_course(self.raw) context = etl_lib.get_context(self.upload_course_args.course_url_prefix) self.assertNotEqual(self.new_course_title, context.get_title()) all_files_before_upload = set( etl._filter_filesystem_files(etl._list_all( context, include_inherited=True))) etl.main( self.upload_course_args, environment_class=testing.FakeEnvironment) # check archive content archive = etl._init_archive(self.archive_path, etl.ARCHIVE_TYPE_ZIP) archive.open('r') context = etl_lib.get_context(self.upload_course_args.course_url_prefix) vfs_files_after_upload = set(context.fs.impl.list( appengine_config.BUNDLE_ROOT)) self.assertEqual( len(archive.manifest.entities) - len(all_files_before_upload) # less already-present files - len(self._get_all_entity_files()), # less entity files len(vfs_files_after_upload - all_files_before_upload)) # check course structure self.assertEqual(self.new_course_title, context.get_title()) units = etl_lib.get_course(context).get_units() spot_check_single_unit = [u for u in units if u.unit_id == 14][0] self.assertEqual('Interpreting results', spot_check_single_unit.title) for unit in units: self.assertTrue(unit.title) # check entities for entity in archive.manifest.entities: _, tail = os.path.split(entity.path) if tail in self._get_all_entity_files(): continue full_path = os.path.join( appengine_config.BUNDLE_ROOT, etl._AbstractArchive.get_external_path(entity.path)) stream = context.fs.impl.get(full_path) self.assertEqual(entity.is_draft, context.fs.is_draft(stream)) # check uploaded question matches original _assert_identical_data_entity_exists( sites.get_all_courses()[0], question) def test_upload_course_with_force_overwrite_succeeds(self): """Tests upload into non-empty course with --force_overwrite.""" self.upload_all_sample_course_files([]) self.import_sample_course() etl.main( self.download_course_args, environment_class=testing.FakeEnvironment) force_overwrite_args = etl.create_args_parser().parse_args( [etl._MODE_UPLOAD] + self.common_course_args + [ '--force_overwrite']) etl.main( force_overwrite_args, environment_class=testing.FakeEnvironment) archive = etl._init_archive(self.archive_path, etl.ARCHIVE_TYPE_ZIP) archive.open('r') context = etl_lib.get_context(self.upload_course_args.course_url_prefix) filesystem_contents = context.fs.impl.list(appengine_config.BUNDLE_ROOT) self.assertEqual( len(archive.manifest.entities), len(filesystem_contents) + len(self._get_all_entity_files())) self.assertEqual(self.new_course_title, context.get_title()) units = etl_lib.get_course(context).get_units() spot_check_single_unit = [u for u in units if u.unit_id == 14][0] self.assertEqual('Interpreting results', spot_check_single_unit.title) for unit in units: self.assertTrue(unit.title) for entity in archive.manifest.entities: _, tail = os.path.split(entity.path) if tail in self._get_all_entity_files(): continue full_path = os.path.join( appengine_config.BUNDLE_ROOT, etl._AbstractArchive.get_external_path(entity.path)) stream = context.fs.impl.get(full_path) self.assertEqual(entity.is_draft, stream.metadata.is_draft) def test_upload_valid_encoded_string_reference(self): with Namespace(self.namespace): string_key = db.Key.from_path('EtlTestEntityPii', '334-44-1234') self._test_upload_valid_reference( string_key, ['--privacy', '--privacy_secret', 'super_seekrit']) def test_upload_valid_encoded_numeric_reference(self): with Namespace(self.namespace): numeric_key = db.Key.from_path('EtlTestEntityPii', 334441234) self._test_upload_valid_reference( numeric_key, ['--privacy', '--privacy_secret', 'super_seekrit']) def test_upload_valid_plaintext_string_reference(self): with Namespace(self.namespace): string_key = db.Key.from_path('EtlTestEntityPii', '334-44-1234') self._test_upload_valid_reference(string_key, []) def test_upload_valid_plaintext_numeric_reference(self): with Namespace(self.namespace): numeric_key = db.Key.from_path('EtlTestEntityPii', 334441234) self._test_upload_valid_reference(numeric_key, []) def _download_archive(self, extra_args=None): extra_args = extra_args or [] etl.main(etl.create_args_parser().parse_args([etl._MODE_DOWNLOAD] + self.common_datastore_args + extra_args), environment_class=testing.FakeEnvironment) def _clear_datastore(self): self.swap( etl, '_raw_input', lambda x: etl._DELETE_DATASTORE_CONFIRMATION_INPUT) etl.main(etl.create_args_parser().parse_args([etl._MODE_DELETE] + self.common_datastore_args), environment_class=testing.FakeEnvironment) def _upload_archive(self, extra_args=None): extra_args = extra_args or [] etl.main(etl.create_args_parser().parse_args([etl._MODE_UPLOAD] + self.common_datastore_args + extra_args), environment_class=testing.FakeEnvironment) def _test_upload_valid_reference(self, pii_key, download_args): # Make ETL archive of item with PII in key using encoding. sites.setup_courses(self.raw) joes_score = 12345 with Namespace(self.namespace): pii = EtlTestEntityPii(key=pii_key, name='Joe', score=joes_score) pii.put() ref = EtlTestEntityPiiReference(pii=pii) ref.put() self._download_archive(download_args) self._clear_datastore() self._upload_archive() # Upload data. with Namespace(self.namespace): piis = EtlTestEntityPii.all().fetch(100) refs = EtlTestEntityPiiReference.all().fetch(100) self.assertEquals(1, len(piis)) self.assertEquals(1, len(refs)) self.assertEquals(joes_score, piis[0].score) self.assertEquals(None, piis[0].name, 'Blacklisted field should be None') self.assertEquals( joes_score, refs[0].pii.score, 'Reference by key using explicit name where key is PII') def test_upload_null_encoded_reference(self): self._test_upload_null_reference(['--privacy', '--privacy_secret', 'super_seekrit']) def test_upload_null_plaintext_reference(self): self._test_upload_null_reference([]) def _test_upload_null_reference(self, download_args): # Make ETL archive of item with PII in key using encoding. sites.setup_courses(self.raw) with Namespace(self.namespace): ref = EtlTestEntityPiiReference(pii=None) ref.put() self._download_archive(download_args) self._clear_datastore() self._upload_archive() with Namespace(self.namespace): refs = EtlTestEntityPiiReference.all().fetch(100) self.assertEquals(1, len(refs)) self.assertEquals(None, refs[0].pii) def test_upload_unsupported_type_fails(self): sites.setup_courses(self.raw) with Namespace(self.namespace): EtlTestEntityIllegal(score=123).put() etl.main(etl.create_args_parser().parse_args([etl._MODE_DOWNLOAD] + self.common_datastore_args), environment_class=testing.FakeEnvironment) with self.assertRaises(SystemExit): etl.main(etl.create_args_parser().parse_args([etl._MODE_UPLOAD] + self.common_datastore_args), environment_class=testing.FakeEnvironment) def test_upload_with_pre_existing_data(self): # make archive file with one element. sites.setup_courses(self.raw) with Namespace(self.namespace): EtlTestEntityPii(name='Fred').put() self._download_archive() # Upload that file - should fail since item still exists. with self.assertRaises(SystemExit): self._upload_archive() # Upload with --force_overwrite should succeed where previous failed. self._upload_archive(['--force_overwrite']) def test_upload_empty_archive_fails(self): sites.setup_courses(self.raw) self._download_archive() with self.assertRaises(SystemExit): self._upload_archive() def test_upload_with_no_classes_allowed_fails(self): # make archive file with one element. sites.setup_courses(self.raw) with Namespace(self.namespace): pii = EtlTestEntityPii(name='Fred') pii.put() self._download_archive() self._clear_datastore() # Upload should fail since --datastore_types does not mention # only type in archive. with self.assertRaises(SystemExit): self._upload_archive(['--datastore_types=FooBar']) # Upload should fail since --exclude_types names the # only type in archive. with self.assertRaises(SystemExit): self._upload_archive(['--exclude_types=EtlTestEntityPii']) def _build_entity_batch(self): ret = [] for _ in xrange(etl.create_args_parser().get_default('batch_size')): ret.append(EtlTestEntityPii(score=self.make_items_distinct_counter)) self.make_items_distinct_counter += 1 return ret def test_upload_resumption_with_trivial_quantity(self): sites.setup_courses(self.raw) with Namespace(self.namespace): thing_one = EtlTestEntityPii(name='Thing One') thing_one.put() thing_two = EtlTestEntityPii(name='Thing Two') thing_two.put() self._download_archive() self._clear_datastore() # Simulate 1st upload having partially succeeded. with Namespace(self.namespace): thing_one.put() # Should not barf. self._upload_archive(['--resume']) self.assertIn('Resuming upload at item number 0 of 2.', self.get_log()) # Upload again; everything should be seen to be present. self._upload_archive(['--resume']) self.assertIn('All 2 entities already uploaded; skipping', self.get_log()) def test_upload_resumption_with_batch_quantity(self): sites.setup_courses(self.raw) with Namespace(self.namespace): batch_one = self._build_entity_batch() batch_two = self._build_entity_batch() db.put(batch_one) db.put(batch_two) self._download_archive() # Simulate 1st batch having partially succeeded, 2nd batch not at all. self._clear_datastore() with Namespace(self.namespace): db.put([x for x in batch_one if x.score % 2]) self._upload_archive(['--resume']) self.assertIn('Resuming upload at item number 0 of 40.', self.get_log()) # Simulate 1st batch having fully succeeded, 2nd batch not at all. self._clear_datastore() with Namespace(self.namespace): db.put(batch_one) self._upload_archive(['--resume']) self.assertIn('Resuming upload at item number 20 of 40.', self.get_log()) # Simulate 1st batch having fully succeeded, 2nd batch partial self._clear_datastore() with Namespace(self.namespace): db.put(batch_one) db.put([x for x in batch_one if x.score % 2]) self._upload_archive(['--resume']) self.assertIn('Resuming upload at item number 20 of 40.', self.get_log()) # Upload again; everything should be seen to be present. self._upload_archive(['--resume']) self.assertIn('All 40 entities already uploaded; skipping', self.get_log()) def test_is_identity_transform_when_privacy_false(self): self.assertEqual( 1, etl._get_privacy_transform_fn(False, 'no_effect')(1)) self.assertEqual( 1, etl._get_privacy_transform_fn(False, 'other_value')(1)) def test_is_hmac_sha_2_256_when_privacy_true(self): # Must run etl.main() to get crypto module loaded. args = etl.create_args_parser().parse_args(['download'] + self.common_course_args) etl.main(args, environment_class=testing.FakeEnvironment) self.assertEqual( crypto.hmac_sha_2_256_transform('secret', 'value'), etl._get_privacy_transform_fn(True, 'secret')('value')) # TODO(johncox): re-enable these tests once we figure out how to make webtest # play nice with remote_api. class EtlRemoteEnvironmentTestCase(actions.TestBase): """Tests tools/etl/remote.py.""" def setUp(self): super(EtlRemoteEnvironmentTestCase, self).setUp() self.test_environ = copy.deepcopy(os.environ) def disabled_test_can_establish_environment_in_dev_mode(self): # Stub the call that requires user input so the test runs unattended. self.swap(__builtin__, 'raw_input', lambda _: 'username') self.assertEqual(os.environ['SERVER_SOFTWARE'], remote.SERVER_SOFTWARE) # establish() performs RPC. If it doesn't throw, we're good. remote.Environment('mycourse', 'localhost:8080').establish() def disabled_test_can_establish_environment_in_test_mode(self): self.test_environ['SERVER_SOFTWARE'] = remote.TEST_SERVER_SOFTWARE self.swap(os, 'environ', self.test_environ) # establish() performs RPC. If it doesn't throw, we're good. remote.Environment('mycourse', 'localhost:8080').establish() class CourseUrlRewritingTest(CourseUrlRewritingTestBase): """Run all existing tests using '/courses/pswg' base URL rewrite rules.""" class VirtualFileSystemTest(VirtualFileSystemTestBase): """Run all existing tests using virtual local file system.""" class MemcacheTestBase(actions.TestBase): """Executes all tests with memcache enabled.""" def setUp(self): super(MemcacheTestBase, self).setUp() config.Registry.test_overrides = {models.CAN_USE_MEMCACHE.name: True} def tearDown(self): config.Registry.test_overrides = {} super(MemcacheTestBase, self).tearDown() class MemcacheTest(MemcacheTestBase): """Executes all tests with memcache enabled.""" class LegacyEMailAsKeyNameTestBase(actions.TestBase): """Executes all tests with legacy Student key as email enabled.""" def setUp(self): super(LegacyEMailAsKeyNameTestBase, self).setUp() self.old_value = models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = True def tearDown(self): models.Student._LEGACY_EMAIL_AS_KEY_NAME_ENABLED = self.old_value super(LegacyEMailAsKeyNameTestBase, self).tearDown() class LegacyEMailAsKeyNameTest(LegacyEMailAsKeyNameTestBase): """Executes all tests with legacy Student key as email enabled.""" class PiiHolder(entities.BaseEntity): user_id = db.StringProperty(indexed=True) age = db.IntegerProperty(indexed=False) class_rank = db.IntegerProperty(indexed=False) registration_date = db.DateTimeProperty(indexed=True, required=True) class_goal = db.StringProperty(indexed=False, required=True) albedo = db.FloatProperty(indexed=False) _PROPERTY_EXPORT_BLACKLIST = [user_id, age] class TransformsEntitySchema(actions.TestBase): def test_schema(self): schema = entity_transforms.get_schema_for_entity(PiiHolder) schema = schema.get_json_schema_dict()['properties'] self.assertNotIn('user_id', schema) self.assertNotIn('age', schema) self.assertIn('class_rank', schema) self.assertEquals('integer', schema['class_rank']['type']) self.assertIn('optional', schema['class_rank']) self.assertEquals(True, schema['class_rank']['optional']) self.assertIn('registration_date', schema) self.assertEquals('datetime', schema['registration_date']['type']) self.assertNotIn('optional', schema['registration_date']) self.assertIn('class_goal', schema) self.assertEquals('string', schema['class_goal']['type']) self.assertNotIn('optional', schema['class_goal']) self.assertIn('albedo', schema) self.assertEquals('number', schema['albedo']['type']) self.assertIn('optional', schema['albedo']) self.assertEquals(True, schema['albedo']['optional']) class TransformsJsonFileTestCase(actions.TestBase): """Tests for models/transforms.py's JsonFile.""" def setUp(self): super(TransformsJsonFileTestCase, self).setUp() self.path = os.path.join(self.test_tempdir, 'file.json') self.reader = transforms.JsonFile(self.path) self.writer = transforms.JsonFile(self.path) self.first = 1 self.second = {'c': 'c_value', 'd': {'nested': 'e'}} def tearDown(self): self.reader.close() self.writer.close() super(TransformsJsonFileTestCase, self).tearDown() def test_round_trip_of_file_with_zero_records(self): self.writer.open('w') self.writer.close() self.reader.open('r') self.assertEqual([], [entity for entity in self.reader]) self.reader.reset() self.assertEqual({'rows': []}, self.reader.read()) def test_round_trip_of_file_with_one_record(self): self.writer.open('w') self.writer.write(self.first) self.writer.close() self.reader.open('r') self.assertEqual([self.first], [entity for entity in self.reader]) self.reader.reset() self.assertEqual({'rows': [self.first]}, self.reader.read()) def test_round_trip_of_file_with_multiple_records(self): self.writer.open('w') self.writer.write(self.first) self.writer.write(self.second) self.writer.close() self.reader.open('r') self.assertEqual( [self.first, self.second], [entity for entity in self.reader]) self.reader.reset() self.assertEqual( {'rows': [self.first, self.second]}, self.reader.read()) class ImportAssessmentTests(DatastoreBackedCourseTest): """Functional tests for assessments.""" def test_assessment_old_style(self): # test that assessment version 1.3 with empty html_content # is not assessment version 1.2 sites.setup_courses('course:/test::ns_test, course:/:/') course = courses.Course(None, app_context=sites.get_all_courses()[0]) a1 = course.add_assessment() course.save() assert course.find_unit_by_id(a1.unit_id) assert not courses.has_at_least_one_old_style_assessment(course) assert courses.has_only_new_style_assessments(course) # test that assessment version 1.3 with empty html_content # and js content is considered old-style assessment a2 = course.add_assessment() a2.title = 'Assessment 2' course.update_unit(a2) assessment_content = open(os.path.join( appengine_config.BUNDLE_ROOT, 'assets/js/assessment-Pre.js'), 'rb').readlines() assessment_content = u''.join(assessment_content) course.set_assessment_content(a2, assessment_content, []) course.save() assert courses.has_at_least_one_old_style_assessment(course) assert not courses.has_only_new_style_assessments(course) def test_import_empty_assessment(self): sites.setup_courses('course:/a::ns_a, course:/b::ns_b') src = courses.Course(None, app_context=sites.get_all_courses()[0]) src.add_assessment() src.save() dst = courses.Course(None, app_context=sites.get_all_courses()[1]) errors = [] dst.import_from(src.app_context, errors) self.assertEqual(0, len(errors)) self.assertEqual(1, len(dst.get_assessment_list())) assert courses.has_only_new_style_assessments(dst) def test_import_course13_w_assessment12(self): """Tests importing a new-style course with old-style assessment.""" # set up the src and dst courses ver.13 sites.setup_courses('course:/a::ns_a, course:/b::ns_b, course:/:/') src_app_ctx = sites.get_all_courses()[0] src_course = courses.Course(None, app_context=src_app_ctx) dst_app_ctx = sites.get_all_courses()[0] dst_course = courses.Course(None, app_context=dst_app_ctx) # add old-style assessment to the src a1_title = 'Assessment content version 12' a1 = src_course.add_assessment() a1.title = a1_title src_course.update_unit(a1) assessment_content = open(os.path.join( appengine_config.BUNDLE_ROOT, 'assets/js/assessment-Pre.js'), 'rb').readlines() assessment_content = u''.join(assessment_content) errors = [] src_course.set_assessment_content( a1, assessment_content, errors) # add new style assessment to src a2_title = 'Assessment content version 13' a2_html_content = 'content' a2 = src_course.add_assessment() a2.title = a2_title a2.html_content = a2_html_content a2.html_check_answers = 'check' a2.html_review_form = 'review' a2.workflow_yaml = 'a: 3' src_course.update_unit(a2) # save course and confirm assessments src_course.save() assert not errors assessment_content_stored = src_course.app_context.fs.get(os.path.join( src_course.app_context.get_home(), src_course.get_assessment_filename(a1.unit_id))) assert assessment_content == assessment_content_stored # import course dst_course.import_from(src_app_ctx, errors) # assert old-style assessment has been ported to a new-style one dst_a1 = dst_course.get_units()[0] self.assertEqual('A', dst_a1.type) self.assertEqual(a1_title, dst_a1.title) assert dst_a1.html_content dst_a2 = dst_course.get_units()[1] self.assertEqual('A', dst_a1.type) self.assertEqual(a2_title, dst_a2.title) self.assertEqual(a2_html_content, dst_a2.html_content) # cleaning up sites.reset_courses() class ImportActivityTests(DatastoreBackedCourseTest): """Functional tests for importing legacy activities into lessons.""" FREETEXT_QUESTION = """ var activity = [ { questionType: 'freetext', correctAnswerRegex: /abc/i, correctAnswerOutput: "Correct.", incorrectAnswerOutput: "Try again.", showAnswerOutput: "A hint." } ]; """ MULTPLE_CHOICE_QUESTION = """ var activity = [ {questionType: 'multiple choice', choices: [ ['a', false, 'A'], ['b', true, 'B'], ['c', false, 'C'], ['d', false, 'D'] ] } ]; """ MULTPLE_CHOICE_GROUP_QUESTION = """ var activity = [ {questionType: 'multiple choice group', questionsList: [ { questionHTML: 'choose a', choices: ['aa', 'bb'], correctIndex: 0 }, { questionHTML: 'choose b or c', choices: ['aa', 'bb', 'cc'], correctIndex: [1, 2] } ] allCorrectOutput: 'unused', someIncorrectOutput: 'also unused' } ]; """ def setUp(self): super(ImportActivityTests, self).setUp() self.course = courses.Course(None, app_context=self.app_context) self.unit = self.course.add_unit() self.lesson = self.course.add_lesson(self.unit) self.old_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace(self.app_context.get_namespace_name()) def tearDown(self): namespace_manager.set_namespace(self.old_namespace) super(ImportActivityTests, self).tearDown() def test_hide_activity(self): """Tests old-style activity annotations.""" # set up a version 13 course sites.setup_courses('course:/test::ns_test, course:/:/') app_ctx = sites.get_all_courses()[0] course = courses.Course(None, app_context=app_ctx) # add a unit & a lesson w.o. activity unit = course.add_unit() course.add_lesson(unit) course.save() # admin logs in and gets the lesson for editing actions.login('admin@foo.com', is_admin=True) response = self.get('/test/dashboard?action=edit_lesson&key=2') self.assertEqual(200, response.status_int) # TODO(nretallack): Make better way to test the oeditor configuration. # This test should check that the following fields are hidden: # activity_title, activity_listed, activity # `video` and `id' are other hidden fields that are irrelevant to this # test. self.assert_num_hidden_annotations(response.body, 5) # add a lesson w. old-style activity lesson = course.add_lesson(unit) lesson.scored = True lesson.has_activity = True lesson.activity_title = 'activity title' lesson.activity_listed = True errors = [] course.set_activity_content( lesson, unicode(self.FREETEXT_QUESTION), errors) assert not errors course.save() # assert that there are no hidden annotations actions.login('admin@foo.com', is_admin=True) response = self.get('/test/dashboard?action=edit_lesson&key=3') self.assertEqual(200, response.status_int) # the `video` and `id` fields will still be hidden self.assert_num_hidden_annotations(response.body, 2) # cleaning up sites.reset_courses() def assert_num_hidden_annotations(self, body, n): start_marker = 'load_schema_with_annotations = function(schema)' suffix = body.split(start_marker)[1] end_marker = re.compile(r'\s+}') func_body = end_marker.split(suffix)[0] self.assertEqual(n, func_body.count('hidden')) def test_import_lesson13_w_activity12_to_lesson13(self): # Setup the src and destination courses. sites.setup_courses('course:/a::ns_a, course:/b::ns_b') src_ctx = sites.get_all_courses()[0] src_course = courses.Course(None, app_context=src_ctx) dst_course = courses.Course( None, app_context=sites.get_all_courses()[1]) # Add a unit & a lesson title = 'activity title' src_unit = src_course.add_unit() src_lesson = src_course.add_lesson(src_unit) src_lesson.title = 'Test Lesson' src_lesson.scored = True src_lesson.objectives = 'objectives' src_lesson.video = 'video' src_lesson.notes = 'notes' src_lesson.duration = 'duration' src_lesson.now_available = True src_lesson.has_activity = True src_lesson.activity_title = title src_lesson.activity_listed = True src_lesson.properties = {'key': 'value'} src_course.save() # Add an old style activity to the src lesson activity = unicode(self.FREETEXT_QUESTION) errors = [] src_course.set_activity_content(src_lesson, activity, errors) assert not errors # Import course and verify activities errors = [] dst_course.import_from(src_ctx, errors) self.assertEqual(0, len(errors)) u1, l1, a1, _ = self.calc_course_stats(src_course) u2, l2, _, _ = self.calc_course_stats(dst_course) self.assertEqual(1, l1) self.assertEqual(2, l2) self.assertEqual(u1, u2) self.assertEqual(l1 + a1, l2) new_lesson = dst_course.get_lessons('1')[1] assert 'quid=' in new_lesson.objectives self.assertEqual(title, new_lesson.title) assert courses.has_at_least_one_old_style_activity(src_course) assert courses.has_only_new_style_activities(dst_course) def test_import_free_text_activity(self): text = self.FREETEXT_QUESTION content, noverify_text = verify.convert_javascript_to_python( text, 'activity') activity = verify.evaluate_python_expression_from_text( content, 'activity', verify.Activity().scope, noverify_text) qid, instance_id = models.QuestionImporter.import_question( activity['activity'][0], self.unit, self.lesson.title, 1, ['task']) assert qid and isinstance(qid, (int, long)) assert re.match(r'^[a-zA-Z0-9]{12}$', instance_id) question = models.QuestionDAO.load(qid) self.assertEqual(question.type, models.QuestionDTO.SHORT_ANSWER) self.assertEqual(question.dict['version'], '1.5') self.assertEqual( question.dict['description'], 'Imported from unit "New Unit", lesson "New Lesson" (question #1)') self.assertEqual(question.dict['question'], 'task') self.assertEqual(question.dict['hint'], 'A hint.') self.assertEqual(question.dict['defaultFeedback'], 'Try again.') self.assertEqual(len(question.dict['graders']), 1) grader = question.dict['graders'][0] self.assertEqual(grader['score'], 1.0) self.assertEqual(grader['matcher'], 'regex') self.assertEqual(grader['response'], '/abc/i') self.assertEqual(grader['feedback'], 'Correct.') def test_import_free_text_with_missing_fields(self): # correctAnswerOutput and incorrectAnswerOutput are missing text = """ var activity = [ { questionType: 'freetext', correctAnswerRegex: /abc/i, showAnswerOutput: "A hint." } ]; """ content, noverify_text = verify.convert_javascript_to_python( text, 'activity') activity = verify.evaluate_python_expression_from_text( content, 'activity', verify.Activity().scope, noverify_text) qid, instance_id = models.QuestionImporter.import_question( activity['activity'][0], self.unit, self.lesson.title, 1, ['task']) assert qid and isinstance(qid, (int, long)) assert re.match(r'^[a-zA-Z0-9]{12}$', instance_id) question = models.QuestionDAO.load(qid) self.assertEqual(question.type, models.QuestionDTO.SHORT_ANSWER) self.assertEqual(question.dict['version'], '1.5') self.assertEqual( question.dict['description'], 'Imported from unit "New Unit", lesson "New Lesson" (question #1)') self.assertEqual(question.dict['question'], 'task') self.assertEqual(question.dict['hint'], 'A hint.') self.assertEqual(question.dict['defaultFeedback'], '') self.assertEqual(len(question.dict['graders']), 1) grader = question.dict['graders'][0] self.assertEqual(grader['score'], 1.0) self.assertEqual(grader['matcher'], 'regex') self.assertEqual(grader['response'], '/abc/i') self.assertEqual(grader['feedback'], '') def test_activity_import_unique_constraint(self): text = self.FREETEXT_QUESTION content, noverify_text = verify.convert_javascript_to_python( text, 'activity') activity = verify.evaluate_python_expression_from_text( content, 'activity', verify.Activity().scope, noverify_text) qid, _ = models.QuestionImporter.import_question( activity['activity'][0], self.unit, self.lesson.title, 1, ['task']) assert qid and isinstance(qid, (int, long)) self.assertRaises(models.CollisionError, models.QuestionImporter.import_question, activity['activity'][0], self.unit, self.lesson.title, 1, ['task']) def test_import_multiple_choice_activity(self): text = self.MULTPLE_CHOICE_QUESTION content, noverify_text = verify.convert_javascript_to_python( text, 'activity') activity = verify.evaluate_python_expression_from_text( content, 'activity', verify.Activity().scope, noverify_text) verify.Verifier().verify_activity_instance(activity, 'none') qid, instance_id = models.QuestionImporter.import_question( activity['activity'][0], self.unit, self.lesson.title, 1, ['task']) assert qid and isinstance(qid, (int, long)) assert re.match(r'^[a-zA-Z0-9]{12}$', instance_id) question = models.QuestionDAO.load(qid) self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE) self.assertEqual(question.dict['version'], '1.5') self.assertEqual( question.dict['description'], 'Imported from unit "New Unit", lesson "New Lesson" (question #1)') self.assertEqual(question.dict['question'], 'task') self.assertEqual(question.dict['multiple_selections'], False) self.assertEqual(len(question.dict['choices']), 4) choices = question.dict['choices'] choices_data = [ ['a', 0.0, 'A'], ['b', 1.0, 'B'], ['c', 0.0, 'C'], ['d', 0.0, 'D']] for i, choice in enumerate(choices): self.assertEqual(choice['text'], choices_data[i][0]) self.assertEqual(choice['score'], choices_data[i][1]) self.assertEqual(choice['feedback'], choices_data[i][2]) def test_import_multiple_choice_group_activity(self): text = self.MULTPLE_CHOICE_GROUP_QUESTION content, noverify_text = verify.convert_javascript_to_python( text, 'activity') activity = verify.evaluate_python_expression_from_text( content, 'activity', verify.Activity().scope, noverify_text) verify.Verifier().verify_activity_instance(activity, 'none') qid, instance_id = models.QuestionImporter.import_question( activity['activity'][0], self.unit, self.lesson.title, 1, ['task']) assert qid and isinstance(qid, (int, long)) assert re.match(r'^[a-zA-Z0-9]{12}$', instance_id) question_group = models.QuestionGroupDAO.load(qid) self.assertEqual(question_group.dict['version'], '1.5') self.assertEqual( question_group.dict['description'], 'Imported from unit "New Unit", lesson "New Lesson" (question #1)') self.assertEqual(len(question_group.dict['items']), 2) items = question_group.dict['items'] self.assertEqual(items[0]['weight'], 1.0) self.assertEqual(items[1]['weight'], 1.0) # The first question is multiple choice with single selection qid = items[0]['question'] question = models.QuestionDAO.load(qid) self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE) self.assertEqual(question.dict['version'], '1.5') self.assertEqual( question.dict['description'], ( 'Imported from unit "New Unit", lesson "New Lesson" ' '(question #1, part #1)')) self.assertEqual(question.dict['question'], 'choose a') self.assertEqual(question.dict['multiple_selections'], False) self.assertEqual(len(question.dict['choices']), 2) choices = question.dict['choices'] self.assertEqual(choices[0]['text'], 'aa') self.assertEqual(choices[0]['score'], 1.0) self.assertEqual(choices[1]['text'], 'bb') self.assertEqual(choices[1]['score'], 0.0) # The second question is multiple choice with multiple selection qid = items[1]['question'] question = models.QuestionDAO.load(qid) self.assertEqual(question.type, models.QuestionDTO.MULTIPLE_CHOICE) self.assertEqual(question.dict['version'], '1.5') self.assertEqual( question.dict['description'], ( 'Imported from unit "New Unit", lesson "New Lesson" ' '(question #1, part #2)')) self.assertEqual(question.dict['question'], 'choose b or c') self.assertEqual(question.dict['multiple_selections'], True) self.assertEqual(len(question.dict['choices']), 3) choices = question.dict['choices'] self.assertEqual(choices[0]['text'], 'aa') self.assertEqual(choices[0]['score'], -1.0) self.assertEqual(choices[1]['text'], 'bb') self.assertEqual(choices[1]['score'], 0.5) self.assertEqual(choices[1]['text'], 'bb') self.assertEqual(choices[1]['score'], 0.5) class ImportGiftQuestionsTests(DatastoreBackedCourseTest): """Functional tests for importing GIFT-formatted questions.""" def test_import_gift_questions(self): # get import gift questions email = 'gift@google.com' actions.login(email, is_admin=True) response = self.get('dashboard?action=import_gift_questions') assert_equals(200, response.status_int) assert_contains('GIFT Questions', response.body) # put import gift questions payload_dict = { 'description': 'gift group', 'questions': '::title mc::q1? {=c ~w}\n\n ::title: true/false:: q2? {T}'} request = {} request['payload'] = transforms.dumps(payload_dict) request[ 'xsrf_token'] = XsrfTokenManager.create_xsrf_token( 'import-gift-questions') response = self.testapp.put('/rest/question/gift?%s' % urllib.urlencode( {'request': transforms.dumps(request)}), {}) assert_equals(response.status_int, 200) assert_contains('gift group', response.body) class NamespaceTest(actions.TestBase): def test_namespace_context_manager(self): pre_test_namespace = namespace_manager.get_namespace() with Namespace('xyzzy'): self.assertEqual(namespace_manager.get_namespace(), 'xyzzy') with Namespace('plugh'): self.assertEqual(namespace_manager.get_namespace(), 'plugh') self.assertEqual(namespace_manager.get_namespace(), 'xyzzy') self.assertEqual(namespace_manager.get_namespace(), pre_test_namespace) def test_namespace_context_manager_handles_exception(self): pre_test_namespace = namespace_manager.get_namespace() try: with Namespace('xyzzy'): self.assertEqual(namespace_manager.get_namespace(), 'xyzzy') raise RuntimeError('No way, Jose') except RuntimeError: pass self.assertEqual(namespace_manager.get_namespace(), pre_test_namespace) ALL_COURSE_TESTS = ( StudentAspectTest, AssessmentTest, CourseAuthorAspectTest, StaticHandlerTest, AdminAspectTest, PeerReviewControllerTest, PeerReviewDashboardAdminTest, PeerReviewAnalyticsTest) MemcacheTest.__bases__ += (InfrastructureTest,) + ALL_COURSE_TESTS CourseUrlRewritingTest.__bases__ += ALL_COURSE_TESTS VirtualFileSystemTest.__bases__ += ALL_COURSE_TESTS DatastoreBackedSampleCourseTest.__bases__ += ALL_COURSE_TESTS LegacyEMailAsKeyNameTest.__bases__ += ALL_COURSE_TESTS
ehiller/CourseBuilderV19-TeacherDashboard
tests/functional/test_classes.py
Python
apache-2.0
232,592
[ "VisIt" ]
3601d0b1cb70fcfa4b1321a9677f2096984a0541d0213efe6f3282f45b6f3856
""" MultiVolumeVisualizationMix :Authors: Berend Klein Haneveld """ from MultiVolumeVisualization import MultiVolumeVisualization from MultiVolumeVisualization import CreateFunctionFromProperties from MultiVolumeVisualization import CreateEmptyFunctions from VolumeVisualization import VisualizationTypeSimple from core.decorators import overrides from PySide.QtGui import QGroupBox from PySide.QtGui import QWidget from PySide.QtGui import QLabel from PySide.QtGui import QGridLayout from PySide.QtGui import QSlider from PySide.QtGui import QComboBox from PySide.QtCore import Qt from vtk import vtkVolumeProperty class MultiVolumeVisualizationMix(MultiVolumeVisualization): """ MultiVolumeVisualizationMix is a visualization that just mixes the two given volume properties together. """ def __init__(self): super(MultiVolumeVisualizationMix, self).__init__() self.fixedOpacity = 1.0 self.movingOpacity = 1.0 self.blendType = 0 # Default blend type self.fixedVisualization = None self.movingVisualization = None self.mapper = None @overrides(MultiVolumeVisualization) def setImageData(self, fixedImageData, movingImageData): self.fixedImageData = fixedImageData self.movingImageData = movingImageData @overrides(MultiVolumeVisualization) def setFixedVisualization(self, visualization): self.fixedVisualization = visualization self.fixedVolProp = self._createVolPropFromVis(self.fixedVisualization, self.fixedOpacity) self.sliderFixedOpacity.setDisabled(self.fixedVisualization.visualizationType != VisualizationTypeSimple) self.labelFixedOpacity.setDisabled(self.fixedVisualization.visualizationType != VisualizationTypeSimple) @overrides(MultiVolumeVisualization) def setMovingVisualization(self, visualization): self.movingVisualization = visualization self.movingVolProp = self._createVolPropFromVis(self.movingVisualization, self.movingOpacity) self.sliderMovingOpacity.setDisabled(self.movingVisualization.visualizationType != VisualizationTypeSimple) self.labelMovingOpacity.setDisabled(self.movingVisualization.visualizationType != VisualizationTypeSimple) @overrides(MultiVolumeVisualization) def getParameterWidget(self): self.labelFixedOpacity = QLabel("Fixed:") self.labelFixedOpacity.setAlignment(Qt.AlignRight) self.labelMovingOpacity = QLabel("Moving:") self.labelMovingOpacity.setAlignment(Qt.AlignRight) self.sliderFixedOpacity = QSlider(Qt.Horizontal) self.sliderFixedOpacity.setValue(pow(self.fixedOpacity, 1.0/3.0) * 100.0) self.sliderMovingOpacity = QSlider(Qt.Horizontal) self.sliderMovingOpacity.setValue(pow(self.movingOpacity, 1.0/3.0) * 100.0) self.blendTypeComboBox = QComboBox() self.blendTypeComboBox.addItem("Default additive blend") self.blendTypeComboBox.addItem("Difference blend") self.blendTypeComboBox.currentIndexChanged.connect(self.valueChanged) # Be sure to connect after the values are set... self.sliderFixedOpacity.valueChanged.connect(self.valueChanged) self.sliderMovingOpacity.valueChanged.connect(self.valueChanged) groupLayout = QGridLayout() groupLayout.setAlignment(Qt.AlignTop) groupLayout.addWidget(self.labelFixedOpacity, 0, 0) groupLayout.addWidget(self.sliderFixedOpacity, 0, 1) groupLayout.addWidget(self.labelMovingOpacity, 1, 0) groupLayout.addWidget(self.sliderMovingOpacity, 1, 1) groupBox = QGroupBox() groupBox.setTitle("Opacity:") groupBox.setLayout(groupLayout) layout = QGridLayout() layout.setAlignment(Qt.AlignTop) layout.addWidget(groupBox) widget = QWidget() widget.setLayout(layout) return widget @overrides(MultiVolumeVisualization) def valueChanged(self, value): """ This method is called when the value of one of the sliders / fields is adjusted. Argument value is unused. It is just there so that it can be connected to the signals of the interface elements. :type value: int """ self.fixedOpacity = applyOpacityFunction(float(self.sliderFixedOpacity.value()) / 100.0) self.movingOpacity = applyOpacityFunction(float(self.sliderMovingOpacity.value()) / 100.0) self.blendType = self.blendTypeComboBox.currentIndex() self.updateTransferFunctions() @overrides(MultiVolumeVisualization) def updateTransferFunctions(self): self.fixedVolProp = self._createVolPropFromVis(self.fixedVisualization, self.fixedOpacity) self.movingVolProp = self._createVolPropFromVis(self.movingVisualization, self.movingOpacity) if self.mapper: self.mapper.SetBlendType(self.blendType) self.updatedTransferFunction.emit() @overrides(MultiVolumeVisualization) def setMapper(self, mapper): self.mapper = mapper def _createVolPropFromVis(self, visualization, opacity): volProp = vtkVolumeProperty() if visualization: volProp.DeepCopy(visualization.volProp) opacityFunction = CreateFunctionFromProperties(opacity, volProp) volProp.SetScalarOpacity(opacityFunction) else: color, opacityFunction = CreateEmptyFunctions() volProp.SetColor(color) volProp.SetScalarOpacity(opacityFunction) return volProp def applyOpacityFunction(value): """ Make sure that the slider opacity values are not linear. """ return value * value * value
berendkleinhaneveld/Registrationshop
ui/visualizations/MultiVolumeVisualizationMix.py
Python
mit
5,177
[ "VTK" ]
2030f69fe9623debbf2431d62d10f5b2e04c28a23fba21dea6ae499979337873
""" Tests for the Course Outline view and supporting views. """ from __future__ import absolute_import import datetime import json import re import six from completion import waffle from completion.models import BlockCompletion from completion.test_utils import CompletionWaffleTestMixin from django.contrib.sites.models import Site from django.test import override_settings from django.urls import reverse from milestones.tests.utils import MilestonesTestCaseMixin from mock import Mock, patch from opaque_keys.edx.keys import CourseKey, UsageKey from pyquery import PyQuery as pq from six import text_type from waffle.models import Switch from waffle.testutils import override_switch from lms.djangoapps.courseware.tests.factories import StaffFactory from gating import api as lms_gating_api from lms.djangoapps.course_api.blocks.transformers.milestones import MilestonesAndSpecialExamsTransformer from openedx.core.lib.gating import api as gating_api from openedx.features.course_experience.views.course_outline import ( DEFAULT_COMPLETION_TRACKING_START, CourseOutlineFragmentView ) from student.models import CourseEnrollment from student.tests.factories import UserFactory from xmodule.modulestore import ModuleStoreEnum from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from .test_course_home import course_home_url TEST_PASSWORD = 'test' GATING_NAMESPACE_QUALIFIER = '.gating' class TestCourseOutlinePage(SharedModuleStoreTestCase): """ Test the course outline view. """ @classmethod def setUpClass(cls): """ Set up an array of various courses to be tested. """ # setUpClassAndTestData() already calls setUpClass on SharedModuleStoreTestCase # pylint: disable=super-method-not-called with super(TestCourseOutlinePage, cls).setUpClassAndTestData(): cls.courses = [] course = CourseFactory.create() with cls.store.bulk_operations(course.id): chapter = ItemFactory.create(category='chapter', parent_location=course.location) sequential = ItemFactory.create(category='sequential', parent_location=chapter.location) vertical = ItemFactory.create(category='vertical', parent_location=sequential.location) course.children = [chapter] chapter.children = [sequential] sequential.children = [vertical] cls.courses.append(course) course = CourseFactory.create() with cls.store.bulk_operations(course.id): chapter = ItemFactory.create(category='chapter', parent_location=course.location) sequential = ItemFactory.create(category='sequential', parent_location=chapter.location) sequential2 = ItemFactory.create(category='sequential', parent_location=chapter.location) vertical = ItemFactory.create( category='vertical', parent_location=sequential.location, display_name="Vertical 1" ) vertical2 = ItemFactory.create( category='vertical', parent_location=sequential2.location, display_name="Vertical 2" ) course.children = [chapter] chapter.children = [sequential, sequential2] sequential.children = [vertical] sequential2.children = [vertical2] cls.courses.append(course) course = CourseFactory.create() with cls.store.bulk_operations(course.id): chapter = ItemFactory.create(category='chapter', parent_location=course.location) sequential = ItemFactory.create( category='sequential', parent_location=chapter.location, due=datetime.datetime.now(), graded=True, format='Homework', ) vertical = ItemFactory.create(category='vertical', parent_location=sequential.location) course.children = [chapter] chapter.children = [sequential] sequential.children = [vertical] cls.courses.append(course) @classmethod def setUpTestData(cls): """Set up and enroll our fake user in the course.""" cls.user = UserFactory(password=TEST_PASSWORD) for course in cls.courses: CourseEnrollment.enroll(cls.user, course.id) def setUp(self): """ Set up for the tests. """ super(TestCourseOutlinePage, self).setUp() self.client.login(username=self.user.username, password=TEST_PASSWORD) def test_outline_details(self): for course in self.courses: url = course_home_url(course) response = self.client.get(url) self.assertEqual(response.status_code, 200) response_content = response.content.decode("utf-8") self.assertTrue(course.children) for chapter in course.children: self.assertIn(chapter.display_name, response_content) self.assertTrue(chapter.children) for sequential in chapter.children: self.assertIn(sequential.display_name, response_content) if sequential.graded: self.assertIn(sequential.due.strftime(u'%Y-%m-%d %H:%M:%S'), response_content) self.assertIn(sequential.format, response_content) self.assertTrue(sequential.children) for vertical in sequential.children: self.assertIn(vertical.display_name, response_content) class TestCourseOutlinePageWithPrerequisites(SharedModuleStoreTestCase, MilestonesTestCaseMixin): """ Test the course outline view with prerequisites. """ TRANSFORMER_CLASS_TO_TEST = MilestonesAndSpecialExamsTransformer @classmethod def setUpClass(cls): """ Creates a test course that can be used for non-destructive tests """ # pylint: disable=super-method-not-called cls.PREREQ_REQUIRED = '(Prerequisite required)' cls.UNLOCKED = 'Unlocked' with super(TestCourseOutlinePageWithPrerequisites, cls).setUpClassAndTestData(): cls.course, cls.course_blocks = cls.create_test_course() @classmethod def setUpTestData(cls): """Set up and enroll our fake user in the course.""" cls.user = UserFactory(password=TEST_PASSWORD) CourseEnrollment.enroll(cls.user, cls.course.id) @classmethod def create_test_course(cls): """Creates a test course.""" course = CourseFactory.create() course.enable_subsection_gating = True course_blocks = {} with cls.store.bulk_operations(course.id): course_blocks['chapter'] = ItemFactory.create( category='chapter', parent_location=course.location ) course_blocks['prerequisite'] = ItemFactory.create( category='sequential', parent_location=course_blocks['chapter'].location, display_name='Prerequisite Exam' ) course_blocks['gated_content'] = ItemFactory.create( category='sequential', parent_location=course_blocks['chapter'].location, display_name='Gated Content' ) course_blocks['prerequisite_vertical'] = ItemFactory.create( category='vertical', parent_location=course_blocks['prerequisite'].location ) course_blocks['gated_content_vertical'] = ItemFactory.create( category='vertical', parent_location=course_blocks['gated_content'].location ) course.children = [course_blocks['chapter']] course_blocks['chapter'].children = [course_blocks['prerequisite'], course_blocks['gated_content']] course_blocks['prerequisite'].children = [course_blocks['prerequisite_vertical']] course_blocks['gated_content'].children = [course_blocks['gated_content_vertical']] if hasattr(cls, 'user'): CourseEnrollment.enroll(cls.user, course.id) return course, course_blocks def setUp(self): """ Set up for the tests. """ super(TestCourseOutlinePageWithPrerequisites, self).setUp() self.client.login(username=self.user.username, password=TEST_PASSWORD) def setup_gated_section(self, gated_block, gating_block): """ Test helper to create a gating requirement Args: gated_block: The block the that learner will not have access to until they complete the gating block gating_block: (The prerequisite) The block that must be completed to get access to the gated block """ gating_api.add_prerequisite(self.course.id, six.text_type(gating_block.location)) gating_api.set_required_content(self.course.id, gated_block.location, gating_block.location, 100) def test_content_locked(self): """ Test that a sequential/subsection with unmet prereqs correctly indicated that its content is locked """ course = self.course self.setup_gated_section(self.course_blocks['gated_content'], self.course_blocks['prerequisite']) response = self.client.get(course_home_url(course)) self.assertEqual(response.status_code, 200) response_content = pq(response.content) # check lock icon is present lock_icon = response_content('.fa-lock') self.assertTrue(lock_icon, "lock icon is not present, but should be") subsection = lock_icon.parents('.subsection-text') # check that subsection-title-name is the display name gated_subsection_title = self.course_blocks['gated_content'].display_name self.assertIn(gated_subsection_title, subsection.children('.subsection-title').html()) # check that it says prerequisite required self.assertIn("Prerequisite:", subsection.children('.details').html()) # check that there is not a screen reader message self.assertFalse(subsection.children('.sr')) def test_content_unlocked(self): """ Test that a sequential/subsection with met prereqs correctly indicated that its content is unlocked """ course = self.course self.setup_gated_section(self.course_blocks['gated_content'], self.course_blocks['prerequisite']) # complete the prerequisite to unlock the gated content # this call triggers reevaluation of prerequisites fulfilled by the gating block. with patch('openedx.core.lib.gating.api.get_subsection_completion_percentage', Mock(return_value=100)): lms_gating_api.evaluate_prerequisite( self.course, Mock(location=self.course_blocks['prerequisite'].location, percent_graded=1.0), self.user, ) response = self.client.get(course_home_url(course)) self.assertEqual(response.status_code, 200) response_content = pq(response.content) # check unlock icon is not present unlock_icon = response_content('.fa-unlock') self.assertFalse(unlock_icon, "unlock icon is present, yet shouldn't be.") gated_subsection_title = self.course_blocks['gated_content'].display_name every_subsection_on_outline = response_content('.subsection-title') subsection_has_gated_text = False says_prerequisite_required = False for subsection_contents in every_subsection_on_outline.contents(): subsection_has_gated_text = gated_subsection_title in subsection_contents says_prerequisite_required = "Prerequisite:" in subsection_contents # check that subsection-title-name is the display name of gated content section self.assertTrue(subsection_has_gated_text) self.assertFalse(says_prerequisite_required) class TestCourseOutlineResumeCourse(SharedModuleStoreTestCase, CompletionWaffleTestMixin): """ Test start course and resume course for the course outline view. Technically, this mixes course home and course outline tests, but checking the counts of start/resume course should be done together to avoid false positives. """ @classmethod def setUpClass(cls): """ Creates a test course that can be used for non-destructive tests """ # setUpClassAndTestData() already calls setUpClass on SharedModuleStoreTestCase # pylint: disable=super-method-not-called with super(TestCourseOutlineResumeCourse, cls).setUpClassAndTestData(): cls.course = cls.create_test_course() @classmethod def setUpTestData(cls): """Set up and enroll our fake user in the course.""" cls.user = UserFactory(password=TEST_PASSWORD) CourseEnrollment.enroll(cls.user, cls.course.id) cls.site = Site.objects.get_current() @classmethod def create_test_course(cls): """ Creates a test course. """ course = CourseFactory.create() with cls.store.bulk_operations(course.id): chapter = ItemFactory.create(category='chapter', parent_location=course.location) sequential = ItemFactory.create(category='sequential', parent_location=chapter.location) sequential2 = ItemFactory.create(category='sequential', parent_location=chapter.location) vertical = ItemFactory.create(category='vertical', parent_location=sequential.location) vertical2 = ItemFactory.create(category='vertical', parent_location=sequential2.location) course.children = [chapter] chapter.children = [sequential, sequential2] sequential.children = [vertical] sequential2.children = [vertical2] if hasattr(cls, 'user'): CourseEnrollment.enroll(cls.user, course.id) return course def setUp(self): """ Set up for the tests. """ super(TestCourseOutlineResumeCourse, self).setUp() self.client.login(username=self.user.username, password=TEST_PASSWORD) def visit_sequential(self, course, chapter, sequential): """ Navigates to the provided sequential. """ last_accessed_url = reverse( 'courseware_section', kwargs={ 'course_id': text_type(course.id), 'chapter': chapter.url_name, 'section': sequential.url_name, } ) self.assertEqual(200, self.client.get(last_accessed_url).status_code) @override_switch( '{}.{}'.format( waffle.WAFFLE_NAMESPACE, waffle.ENABLE_COMPLETION_TRACKING ), active=True ) def complete_sequential(self, course, sequential): """ Completes provided sequential. """ course_key = CourseKey.from_string(str(course.id)) # Fake a visit to sequence2/vertical2 block_key = UsageKey.from_string(six.text_type(sequential.location)) completion = 1.0 BlockCompletion.objects.submit_completion( user=self.user, course_key=course_key, block_key=block_key, completion=completion ) def visit_course_home(self, course, start_count=0, resume_count=0): """ Helper function to navigates to course home page, test for resume buttons :param course: course factory object :param start_count: number of times 'Start Course' should appear :param resume_count: number of times 'Resume Course' should appear :return: response object """ response = self.client.get(course_home_url(course)) self.assertEqual(response.status_code, 200) self.assertContains(response, 'Start Course', count=start_count) self.assertContains(response, 'Resume Course', count=resume_count) return response def test_course_home_completion(self): """ Test that completed blocks appear checked on course home page """ self.override_waffle_switch(True) course = self.course vertical = course.children[0].children[0].children[0] response = self.client.get(course_home_url(course)) content = pq(response.content) self.assertEqual(len(content('.fa-check')), 0) self.complete_sequential(self.course, vertical) response = self.client.get(course_home_url(course)) content = pq(response.content) # vertical and its parent should be checked self.assertEqual(len(content('.fa-check')), 2) def test_start_course(self): """ Tests that the start course button appears when the course has never been accessed. Technically, this is a course home test, and not a course outline test, but checking the counts of start/resume course should be done together to not get a false positive. """ course = self.course response = self.visit_course_home(course, start_count=1, resume_count=0) content = pq(response.content) self.assertTrue(content('.action-resume-course').attr('href').endswith('/course/' + course.url_name)) @override_settings(LMS_BASE='test_url:9999') def test_resume_course_with_completion_api(self): """ Tests completion API resume button functionality """ self.override_waffle_switch(True) # Course tree course = self.course course_key = CourseKey.from_string(str(course.id)) vertical1 = course.children[0].children[0].children[0] vertical2 = course.children[0].children[1].children[0] self.complete_sequential(self.course, vertical1) # Test for 'resume' link response = self.visit_course_home(course, resume_count=1) # Test for 'resume' link URL - should be vertical 1 content = pq(response.content) self.assertTrue(content('.action-resume-course').attr('href').endswith('/vertical/' + vertical1.url_name)) self.complete_sequential(self.course, vertical2) # Test for 'resume' link response = self.visit_course_home(course, resume_count=1) # Test for 'resume' link URL - should be vertical 2 content = pq(response.content) self.assertTrue(content('.action-resume-course').attr('href').endswith('/vertical/' + vertical2.url_name)) # visit sequential 1, make sure 'Resume Course' URL is robust against 'Last Visited' # (even though I visited seq1/vert1, 'Resume Course' still points to seq2/vert2) self.visit_sequential(course, course.children[0], course.children[0].children[0]) # Test for 'resume' link URL - should be vertical 2 (last completed block, NOT last visited) response = self.visit_course_home(course, resume_count=1) content = pq(response.content) self.assertTrue(content('.action-resume-course').attr('href').endswith('/vertical/' + vertical2.url_name)) def test_resume_course_deleted_sequential(self): """ Tests resume course when the last completed sequential is deleted and there is another sequential in the vertical. """ course = self.create_test_course() # first navigate to a sequential to make it the last accessed chapter = course.children[0] self.assertGreaterEqual(len(chapter.children), 2) sequential = chapter.children[0] sequential2 = chapter.children[1] self.complete_sequential(course, sequential) self.complete_sequential(course, sequential2) # remove one of the sequentials from the chapter with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id): self.store.delete_item(sequential.location, self.user.id) # check resume course buttons response = self.visit_course_home(course, resume_count=1) content = pq(response.content) self.assertTrue(content('.action-resume-course').attr('href').endswith('/sequential/' + sequential2.url_name)) def test_resume_course_deleted_sequentials(self): """ Tests resume course when the last completed sequential is deleted and there are no sequentials left in the vertical. """ course = self.create_test_course() # first navigate to a sequential to make it the last accessed chapter = course.children[0] self.assertEqual(len(chapter.children), 2) sequential = chapter.children[0] self.complete_sequential(course, sequential) # remove all sequentials from chapter with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id): for sequential in chapter.children: self.store.delete_item(sequential.location, self.user.id) # check resume course buttons self.visit_course_home(course, start_count=1, resume_count=0) def test_course_home_for_global_staff(self): """ Tests that staff user can access the course home without being enrolled in the course. """ course = self.course self.user.is_staff = True self.user.save() self.override_waffle_switch(True) CourseEnrollment.get_enrollment(self.user, course.id).delete() response = self.visit_course_home(course, start_count=1, resume_count=0) content = pq(response.content) self.assertTrue(content('.action-resume-course').attr('href').endswith('/course/' + course.url_name)) @override_switch( '{}.{}'.format( waffle.WAFFLE_NAMESPACE, waffle.ENABLE_COMPLETION_TRACKING ), active=True ) def test_course_outline_auto_open(self): """ Tests that the course outline auto-opens to the first unit in a course if a user has no completion data, and to the last-accessed unit if a user does have completion data. """ def get_sequential_button(url, is_hidden): is_hidden_string = "is-hidden" if is_hidden else "" return "<olclass=\"outline-itemaccordion-panel" + is_hidden_string + "\"" \ "id=\"" + url + "_contents\"" \ "aria-labelledby=\"" + url + "\"" \ ">" # Course tree course = self.course chapter = course.children[0] sequential1 = chapter.children[0] sequential2 = chapter.children[1] response_content = self.client.get(course_home_url(course)).content stripped_response = text_type(re.sub("\\s+", "", response_content), "utf-8") self.assertTrue(get_sequential_button(text_type(sequential1.location), False) in stripped_response) self.assertTrue(get_sequential_button(text_type(sequential2.location), True) in stripped_response) content = pq(response_content) button = content('#expand-collapse-outline-all-button') self.assertEqual('Expand All', button.children()[0].text) def test_user_enrolled_after_completion_collection(self): """ Tests that the _completion_data_collection_start() method returns the created time of the waffle switch that enables completion data tracking. """ view = CourseOutlineFragmentView() switches = waffle.waffle() # pylint: disable=protected-access switch_name = switches._namespaced_name(waffle.ENABLE_COMPLETION_TRACKING) switch, _ = Switch.objects.get_or_create(name=switch_name) self.assertEqual(switch.created, view._completion_data_collection_start()) switch.delete() def test_user_enrolled_after_completion_collection_default(self): """ Tests that the _completion_data_collection_start() method returns a default constant when no Switch object exists for completion data tracking. """ view = CourseOutlineFragmentView() # pylint: disable=protected-access self.assertEqual(DEFAULT_COMPLETION_TRACKING_START, view._completion_data_collection_start()) class TestCourseOutlinePreview(SharedModuleStoreTestCase): """ Unit tests for staff preview of the course outline. """ def update_masquerade(self, course, role, group_id=None, user_name=None): """ Toggle masquerade state. """ masquerade_url = reverse( 'masquerade_update', kwargs={ 'course_key_string': six.text_type(course.id), } ) response = self.client.post( masquerade_url, json.dumps({'role': role, 'group_id': group_id, 'user_name': user_name}), 'application/json' ) self.assertEqual(response.status_code, 200) return response def test_preview(self): """ Verify the behavior of preview for the course outline. """ course = CourseFactory.create( start=datetime.datetime.now() - datetime.timedelta(days=30) ) staff_user = StaffFactory(course_key=course.id, password=TEST_PASSWORD) CourseEnrollment.enroll(staff_user, course.id) future_date = datetime.datetime.now() + datetime.timedelta(days=30) with self.store.bulk_operations(course.id): chapter = ItemFactory.create( category='chapter', parent_location=course.location, display_name='First Chapter', ) sequential = ItemFactory.create(category='sequential', parent_location=chapter.location) ItemFactory.create(category='vertical', parent_location=sequential.location) chapter = ItemFactory.create( category='chapter', parent_location=course.location, display_name='Future Chapter', start=future_date, ) sequential = ItemFactory.create(category='sequential', parent_location=chapter.location) ItemFactory.create(category='vertical', parent_location=sequential.location) # Verify that a staff user sees a chapter with a due date in the future self.client.login(username=staff_user.username, password='test') url = course_home_url(course) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertContains(response, 'Future Chapter') # Verify that staff masquerading as a learner see the future chapter. self.update_masquerade(course, role='student') response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertContains(response, 'Future Chapter')
ESOedX/edx-platform
openedx/features/course_experience/tests/views/test_course_outline.py
Python
agpl-3.0
27,152
[ "VisIt" ]
90438417610fc3f96cc20f70f9f6d7d82eea68c8b04524c8d548923f64c3cf95