text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from rdkit import Chem
from rdkit.Chem import rdConformerParser
from rdkit import RDConfig
import unittest
import os
from rdkit.RDLogger import logger
logger = logger()
class TestCase(unittest.TestCase):
def setUp(self):
pass
def testReadAmberTraj(self):
fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'test_data', 'water_coords.trx')
mol = Chem.MolFromSmiles('O')
mol = Chem.AddHs(mol)
ids = rdConformerParser.AddConformersFromAmberTrajectory(mol, fileN)
self.failUnless(mol.GetNumConformers() == 1)
self.failUnless(len(ids) == 1)
self.failUnless(ids[0] == 0)
fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'test_data', 'water_coords2.trx')
ids = rdConformerParser.AddConformersFromAmberTrajectory(mol, fileN, clearConfs=True)
self.failUnless(mol.GetNumConformers() == 2)
ids = rdConformerParser.AddConformersFromAmberTrajectory(mol, fileN, clearConfs=False)
self.failUnless(mol.GetNumConformers() == 4)
ids = rdConformerParser.AddConformersFromAmberTrajectory(mol, fileN, numConfs=1,
clearConfs=True)
self.failUnless(mol.GetNumConformers() == 1)
if __name__ == '__main__':
unittest.main()
|
bp-kelley/rdkit
|
Contrib/ConformerParser/Wrap/testConformerParser.py
|
Python
|
bsd-3-clause
| 1,253
|
[
"RDKit"
] |
c40a36a6f4fef3e8eddf3dd50be962b525d67e8c6e1bb9f67761822dc241b09e
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database of Pulay corannulene structures. Subsumed into CFLOW.
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
"""
import re
import qcdb
# <<< CORE Database Module >>>
# Geometries and Reference energies from.
dbse = 'CORE'
# <<< Database Members >>>
HRXN = ['dimer3_54', 'dimer3_64', 'dimer3_73', 'dimer3_74', 'dimer3_84', ]
HRXN_SM = []
HRXN_LG = []
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supermolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] >>>
# Taken from
BIND = {}
BIND['%s-%s' % (dbse, 'dimer3_54' )] = -14.8000
BIND['%s-%s' % (dbse, 'dimer3_64' )] = -15.4000
BIND['%s-%s' % (dbse, 'dimer3_73' )] = -15.6000 # Bootstrapped, Pulay does not report
BIND['%s-%s' % (dbse, 'dimer3_74' )] = -15.4000
BIND['%s-%s' % (dbse, 'dimer3_84' )] = -15.0000
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 'dimer3_54' )] = """ """
TAGL['%s-%s-dimer' % (dbse, 'dimer3_54' )] = """Dimer from """
TAGL['%s-%s-monoA-CP' % (dbse, 'dimer3_54' )] = """Monomer A from """
TAGL['%s-%s-monoB-CP' % (dbse, 'dimer3_54' )] = """Monomer B from """
TAGL['%s-%s-monoA-unCP' % (dbse, 'dimer3_54' )] = """Monomer A from """
TAGL['%s-%s-monoB-unCP' % (dbse, 'dimer3_54' )] = """Monomer B from """
TAGL['%s-%s' % (dbse, 'dimer3_64' )] = """ """
TAGL['%s-%s-dimer' % (dbse, 'dimer3_64' )] = """Dimer from """
TAGL['%s-%s-monoA-CP' % (dbse, 'dimer3_64' )] = """Monomer A from """
TAGL['%s-%s-monoB-CP' % (dbse, 'dimer3_64' )] = """Monomer B from """
TAGL['%s-%s-monoA-unCP' % (dbse, 'dimer3_64' )] = """Monomer A from """
TAGL['%s-%s-monoB-unCP' % (dbse, 'dimer3_64' )] = """Monomer B from """
TAGL['%s-%s' % (dbse, 'dimer3_73' )] = """ """
TAGL['%s-%s-dimer' % (dbse, 'dimer3_73' )] = """Dimer from """
TAGL['%s-%s-monoA-CP' % (dbse, 'dimer3_73' )] = """Monomer A from """
TAGL['%s-%s-monoB-CP' % (dbse, 'dimer3_73' )] = """Monomer B from """
TAGL['%s-%s-monoA-unCP' % (dbse, 'dimer3_73' )] = """Monomer A from """
TAGL['%s-%s-monoB-unCP' % (dbse, 'dimer3_73' )] = """Monomer B from """
TAGL['%s-%s' % (dbse, 'dimer3_74' )] = """ """
TAGL['%s-%s-dimer' % (dbse, 'dimer3_74' )] = """Dimer from """
TAGL['%s-%s-monoA-CP' % (dbse, 'dimer3_74' )] = """Monomer A from """
TAGL['%s-%s-monoB-CP' % (dbse, 'dimer3_74' )] = """Monomer B from """
TAGL['%s-%s-monoA-unCP' % (dbse, 'dimer3_74' )] = """Monomer A from """
TAGL['%s-%s-monoB-unCP' % (dbse, 'dimer3_74' )] = """Monomer B from """
TAGL['%s-%s' % (dbse, 'dimer3_84' )] = """ """
TAGL['%s-%s-dimer' % (dbse, 'dimer3_84' )] = """Dimer from """
TAGL['%s-%s-monoA-CP' % (dbse, 'dimer3_84' )] = """Monomer A from """
TAGL['%s-%s-monoB-CP' % (dbse, 'dimer3_84' )] = """Monomer B from """
TAGL['%s-%s-monoA-unCP' % (dbse, 'dimer3_84' )] = """Monomer A from """
TAGL['%s-%s-monoB-unCP' % (dbse, 'dimer3_84' )] = """Monomer B from """
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, 'dimer3_54')] = qcdb.Molecule("""
0 1
C 0.70622800 0.97211978 0.61694803
C -0.70622800 0.97211978 0.61694803
C -1.14280400 -0.37137722 0.61681203
C 0.00000000 -1.20165922 0.61659503
C 1.14280400 -0.37137722 0.61681203
C 1.45779000 2.00650178 0.09413403
C -1.45779000 2.00650178 0.09413403
C -2.35873800 -0.76639722 0.09397203
C 0.00000000 -2.48004022 0.09366903
C 2.35873800 -0.76639722 0.09397203
C 0.69261800 3.17923978 -0.25321497
C -0.69261800 3.17923978 -0.25321497
C -2.80958100 1.64119778 -0.25292797
C -3.23765700 0.32373778 -0.25303797
C -2.42918200 -2.16498922 -0.25302597
C -1.30841500 -2.97916822 -0.25327697
C 1.30841500 -2.97916822 -0.25327697
C 2.42918200 -2.16498922 -0.25302597
C 3.23765700 0.32373778 -0.25303797
C 2.80958100 1.64119778 -0.25292797
H 1.20851300 4.06642078 -0.61418797
H -1.20851300 4.06642078 -0.61418797
H -3.49401500 2.40602178 -0.61367197
H -4.24094400 0.10729578 -0.61373997
H -3.36816400 -2.57958822 -0.61350597
H -1.41248600 -4.00024222 -0.61397997
H 1.41248600 -4.00024222 -0.61397997
H 3.36816400 -2.57958822 -0.61350597
H 4.24094400 0.10729578 -0.61373997
H 3.49401500 2.40602178 -0.61367197
--
0 1
C 0.70622800 0.97211978 4.15694803
C -0.70622800 0.97211978 4.15694803
C -1.14280400 -0.37137722 4.15681203
C 0.00000000 -1.20165922 4.15659503
C 1.14280400 -0.37137722 4.15681203
C 1.45779000 2.00650178 3.63413403
C -1.45779000 2.00650178 3.63413403
C -2.35873800 -0.76639722 3.63397203
C 0.00000000 -2.48004022 3.63366903
C 2.35873800 -0.76639722 3.63397203
C 0.69261800 3.17923978 3.28678503
C -0.69261800 3.17923978 3.28678503
C -2.80958100 1.64119778 3.28707203
C -3.23765700 0.32373778 3.28696203
C -2.42918200 -2.16498922 3.28697403
C -1.30841500 -2.97916822 3.28672303
C 1.30841500 -2.97916822 3.28672303
C 2.42918200 -2.16498922 3.28697403
C 3.23765700 0.32373778 3.28696203
C 2.80958100 1.64119778 3.28707203
H 1.20851300 4.06642078 2.92581203
H -1.20851300 4.06642078 2.92581203
H -3.49401500 2.40602178 2.92632803
H -4.24094400 0.10729578 2.92626003
H -3.36816400 -2.57958822 2.92649403
H -1.41248600 -4.00024222 2.92602003
H 1.41248600 -4.00024222 2.92602003
H 3.36816400 -2.57958822 2.92649403
H 4.24094400 0.10729578 2.92626003
H 3.49401500 2.40602178 2.92632803
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, 'dimer3_64')] = qcdb.Molecule("""
0 1
C 0.70622800 0.97211978 0.61694803
C -0.70622800 0.97211978 0.61694803
C -1.14280400 -0.37137722 0.61681203
C 0.00000000 -1.20165922 0.61659503
C 1.14280400 -0.37137722 0.61681203
C 1.45779000 2.00650178 0.09413403
C -1.45779000 2.00650178 0.09413403
C -2.35873800 -0.76639722 0.09397203
C 0.00000000 -2.48004022 0.09366903
C 2.35873800 -0.76639722 0.09397203
C 0.69261800 3.17923978 -0.25321497
C -0.69261800 3.17923978 -0.25321497
C -2.80958100 1.64119778 -0.25292797
C -3.23765700 0.32373778 -0.25303797
C -2.42918200 -2.16498922 -0.25302597
C -1.30841500 -2.97916822 -0.25327697
C 1.30841500 -2.97916822 -0.25327697
C 2.42918200 -2.16498922 -0.25302597
C 3.23765700 0.32373778 -0.25303797
C 2.80958100 1.64119778 -0.25292797
H 1.20851300 4.06642078 -0.61418797
H -1.20851300 4.06642078 -0.61418797
H -3.49401500 2.40602178 -0.61367197
H -4.24094400 0.10729578 -0.61373997
H -3.36816400 -2.57958822 -0.61350597
H -1.41248600 -4.00024222 -0.61397997
H 1.41248600 -4.00024222 -0.61397997
H 3.36816400 -2.57958822 -0.61350597
H 4.24094400 0.10729578 -0.61373997
H 3.49401500 2.40602178 -0.61367197
--
0 1
C 0.70622800 0.97211978 4.25694803
C -0.70622800 0.97211978 4.25694803
C -1.14280400 -0.37137722 4.25681203
C 0.00000000 -1.20165922 4.25659503
C 1.14280400 -0.37137722 4.25681203
C 1.45779000 2.00650178 3.73413403
C -1.45779000 2.00650178 3.73413403
C -2.35873800 -0.76639722 3.73397203
C 0.00000000 -2.48004022 3.73366903
C 2.35873800 -0.76639722 3.73397203
C 0.69261800 3.17923978 3.38678503
C -0.69261800 3.17923978 3.38678503
C -2.80958100 1.64119778 3.38707203
C -3.23765700 0.32373778 3.38696203
C -2.42918200 -2.16498922 3.38697403
C -1.30841500 -2.97916822 3.38672303
C 1.30841500 -2.97916822 3.38672303
C 2.42918200 -2.16498922 3.38697403
C 3.23765700 0.32373778 3.38696203
C 2.80958100 1.64119778 3.38707203
H 1.20851300 4.06642078 3.02581203
H -1.20851300 4.06642078 3.02581203
H -3.49401500 2.40602178 3.02632803
H -4.24094400 0.10729578 3.02626003
H -3.36816400 -2.57958822 3.02649403
H -1.41248600 -4.00024222 3.02602003
H 1.41248600 -4.00024222 3.02602003
H 3.36816400 -2.57958822 3.02649403
H 4.24094400 0.10729578 3.02626003
H 3.49401500 2.40602178 3.02632803
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, 'dimer3_73')] = qcdb.Molecule("""
0 1
C 0.70622800 0.97211978 0.61694803
C -0.70622800 0.97211978 0.61694803
C -1.14280400 -0.37137722 0.61681203
C 0.00000000 -1.20165922 0.61659503
C 1.14280400 -0.37137722 0.61681203
C 1.45779000 2.00650178 0.09413403
C -1.45779000 2.00650178 0.09413403
C -2.35873800 -0.76639722 0.09397203
C 0.00000000 -2.48004022 0.09366903
C 2.35873800 -0.76639722 0.09397203
C 0.69261800 3.17923978 -0.25321497
C -0.69261800 3.17923978 -0.25321497
C -2.80958100 1.64119778 -0.25292797
C -3.23765700 0.32373778 -0.25303797
C -2.42918200 -2.16498922 -0.25302597
C -1.30841500 -2.97916822 -0.25327697
C 1.30841500 -2.97916822 -0.25327697
C 2.42918200 -2.16498922 -0.25302597
C 3.23765700 0.32373778 -0.25303797
C 2.80958100 1.64119778 -0.25292797
H 1.20851300 4.06642078 -0.61418797
H -1.20851300 4.06642078 -0.61418797
H -3.49401500 2.40602178 -0.61367197
H -4.24094400 0.10729578 -0.61373997
H -3.36816400 -2.57958822 -0.61350597
H -1.41248600 -4.00024222 -0.61397997
H 1.41248600 -4.00024222 -0.61397997
H 3.36816400 -2.57958822 -0.61350597
H 4.24094400 0.10729578 -0.61373997
H 3.49401500 2.40602178 -0.61367197
--
0 1
C 0.70622800 0.97211978 4.34694803
C -0.70622800 0.97211978 4.34694803
C -1.14280400 -0.37137722 4.34681203
C 0.00000000 -1.20165922 4.34659503
C 1.14280400 -0.37137722 4.34681203
C 1.45779000 2.00650178 3.82413403
C -1.45779000 2.00650178 3.82413403
C -2.35873800 -0.76639722 3.82397203
C 0.00000000 -2.48004022 3.82366903
C 2.35873800 -0.76639722 3.82397203
C 0.69261800 3.17923978 3.47678503
C -0.69261800 3.17923978 3.47678503
C -2.80958100 1.64119778 3.47707203
C -3.23765700 0.32373778 3.47696203
C -2.42918200 -2.16498922 3.47697403
C -1.30841500 -2.97916822 3.47672303
C 1.30841500 -2.97916822 3.47672303
C 2.42918200 -2.16498922 3.47697403
C 3.23765700 0.32373778 3.47696203
C 2.80958100 1.64119778 3.47707203
H 1.20851300 4.06642078 3.11581203
H -1.20851300 4.06642078 3.11581203
H -3.49401500 2.40602178 3.11632803
H -4.24094400 0.10729578 3.11626003
H -3.36816400 -2.57958822 3.11649403
H -1.41248600 -4.00024222 3.11602003
H 1.41248600 -4.00024222 3.11602003
H 3.36816400 -2.57958822 3.11649403
H 4.24094400 0.10729578 3.11626003
H 3.49401500 2.40602178 3.11632803
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, 'dimer3_74')] = qcdb.Molecule("""
0 1
C 0.70622800 0.97211978 0.61694803
C -0.70622800 0.97211978 0.61694803
C -1.14280400 -0.37137722 0.61681203
C 0.00000000 -1.20165922 0.61659503
C 1.14280400 -0.37137722 0.61681203
C 1.45779000 2.00650178 0.09413403
C -1.45779000 2.00650178 0.09413403
C -2.35873800 -0.76639722 0.09397203
C 0.00000000 -2.48004022 0.09366903
C 2.35873800 -0.76639722 0.09397203
C 0.69261800 3.17923978 -0.25321497
C -0.69261800 3.17923978 -0.25321497
C -2.80958100 1.64119778 -0.25292797
C -3.23765700 0.32373778 -0.25303797
C -2.42918200 -2.16498922 -0.25302597
C -1.30841500 -2.97916822 -0.25327697
C 1.30841500 -2.97916822 -0.25327697
C 2.42918200 -2.16498922 -0.25302597
C 3.23765700 0.32373778 -0.25303797
C 2.80958100 1.64119778 -0.25292797
H 1.20851300 4.06642078 -0.61418797
H -1.20851300 4.06642078 -0.61418797
H -3.49401500 2.40602178 -0.61367197
H -4.24094400 0.10729578 -0.61373997
H -3.36816400 -2.57958822 -0.61350597
H -1.41248600 -4.00024222 -0.61397997
H 1.41248600 -4.00024222 -0.61397997
H 3.36816400 -2.57958822 -0.61350597
H 4.24094400 0.10729578 -0.61373997
H 3.49401500 2.40602178 -0.61367197
--
0 1
C 0.70622800 0.97211978 4.35694803
C -0.70622800 0.97211978 4.35694803
C -1.14280400 -0.37137722 4.35681203
C 0.00000000 -1.20165922 4.35659503
C 1.14280400 -0.37137722 4.35681203
C 1.45779000 2.00650178 3.83413403
C -1.45779000 2.00650178 3.83413403
C -2.35873800 -0.76639722 3.83397203
C 0.00000000 -2.48004022 3.83366903
C 2.35873800 -0.76639722 3.83397203
C 0.69261800 3.17923978 3.48678503
C -0.69261800 3.17923978 3.48678503
C -2.80958100 1.64119778 3.48707203
C -3.23765700 0.32373778 3.48696203
C -2.42918200 -2.16498922 3.48697403
C -1.30841500 -2.97916822 3.48672303
C 1.30841500 -2.97916822 3.48672303
C 2.42918200 -2.16498922 3.48697403
C 3.23765700 0.32373778 3.48696203
C 2.80958100 1.64119778 3.48707203
H 1.20851300 4.06642078 3.12581203
H -1.20851300 4.06642078 3.12581203
H -3.49401500 2.40602178 3.12632803
H -4.24094400 0.10729578 3.12626003
H -3.36816400 -2.57958822 3.12649403
H -1.41248600 -4.00024222 3.12602003
H 1.41248600 -4.00024222 3.12602003
H 3.36816400 -2.57958822 3.12649403
H 4.24094400 0.10729578 3.12626003
H 3.49401500 2.40602178 3.12632803
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, 'dimer3_84')] = qcdb.Molecule("""
0 1
C 0.70622800 0.97211978 0.61694803
C -0.70622800 0.97211978 0.61694803
C -1.14280400 -0.37137722 0.61681203
C 0.00000000 -1.20165922 0.61659503
C 1.14280400 -0.37137722 0.61681203
C 1.45779000 2.00650178 0.09413403
C -1.45779000 2.00650178 0.09413403
C -2.35873800 -0.76639722 0.09397203
C 0.00000000 -2.48004022 0.09366903
C 2.35873800 -0.76639722 0.09397203
C 0.69261800 3.17923978 -0.25321497
C -0.69261800 3.17923978 -0.25321497
C -2.80958100 1.64119778 -0.25292797
C -3.23765700 0.32373778 -0.25303797
C -2.42918200 -2.16498922 -0.25302597
C -1.30841500 -2.97916822 -0.25327697
C 1.30841500 -2.97916822 -0.25327697
C 2.42918200 -2.16498922 -0.25302597
C 3.23765700 0.32373778 -0.25303797
C 2.80958100 1.64119778 -0.25292797
H 1.20851300 4.06642078 -0.61418797
H -1.20851300 4.06642078 -0.61418797
H -3.49401500 2.40602178 -0.61367197
H -4.24094400 0.10729578 -0.61373997
H -3.36816400 -2.57958822 -0.61350597
H -1.41248600 -4.00024222 -0.61397997
H 1.41248600 -4.00024222 -0.61397997
H 3.36816400 -2.57958822 -0.61350597
H 4.24094400 0.10729578 -0.61373997
H 3.49401500 2.40602178 -0.61367197
--
0 1
C 0.70622800 0.97211978 4.45694803
C -0.70622800 0.97211978 4.45694803
C -1.14280400 -0.37137722 4.45681203
C 0.00000000 -1.20165922 4.45659503
C 1.14280400 -0.37137722 4.45681203
C 1.45779000 2.00650178 3.93413403
C -1.45779000 2.00650178 3.93413403
C -2.35873800 -0.76639722 3.93397203
C 0.00000000 -2.48004022 3.93366903
C 2.35873800 -0.76639722 3.93397203
C 0.69261800 3.17923978 3.58678503
C -0.69261800 3.17923978 3.58678503
C -2.80958100 1.64119778 3.58707203
C -3.23765700 0.32373778 3.58696203
C -2.42918200 -2.16498922 3.58697403
C -1.30841500 -2.97916822 3.58672303
C 1.30841500 -2.97916822 3.58672303
C 2.42918200 -2.16498922 3.58697403
C 3.23765700 0.32373778 3.58696203
C 2.80958100 1.64119778 3.58707203
H 1.20851300 4.06642078 3.22581203
H -1.20851300 4.06642078 3.22581203
H -3.49401500 2.40602178 3.22632803
H -4.24094400 0.10729578 3.22626003
H -3.36816400 -2.57958822 3.22649403
H -1.41248600 -4.00024222 3.22602003
H 1.41248600 -4.00024222 3.22602003
H 3.36816400 -2.57958822 3.22649403
H 4.24094400 0.10729578 3.22626003
H 3.49401500 2.40602178 3.22632803
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_54-dimer' ] = 4584.11459289
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_54-monoA-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_54-monoB-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_64-dimer' ] = 4555.01239979
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_64-monoA-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_64-monoB-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_73-dimer' ] = 4529.48976988
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_73-monoA-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_73-monoB-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_74-dimer' ] = 4526.69216135
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_74-monoA-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_74-monoB-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_84-dimer' ] = 4499.12706628
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_84-monoA-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_84-monoB-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_54-monoA-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_54-monoB-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_64-monoA-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_64-monoB-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_73-monoA-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_73-monoB-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_74-monoA-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_74-monoB-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_84-monoA-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_84-monoB-CP' ] = 1387.77369315
|
psi4/psi4
|
psi4/share/psi4/databases/CORE.py
|
Python
|
lgpl-3.0
| 23,803
|
[
"Psi4"
] |
7e494d245ae960f5881d2a42efe681828c49063dfbbc79e26e0f4e49974edd33
|
"""
CBMPy: CBWx module
===================
PySCeS Constraint Based Modelling (http://cbmpy.sourceforge.net)
Copyright (C) 2009-2022 Brett G. Olivier, VU University Amsterdam, Amsterdam, The Netherlands
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Author: Brett G. Olivier
Contact email: bgoli@users.sourceforge.net
Last edit: $Author: bgoli $ ($Id: CBWx.py 710 2020-04-27 14:22:34Z bgoli $)
"""
# preparing for Python 3 port
from __future__ import division, print_function
from __future__ import absolute_import
# from __future__ import unicode_literals
import os, time, random, math, re, webbrowser, locale, sys
# this is a hack that needs to be streamlined a bit
try:
import cStringIO as csio
except ImportError:
import io as csio
HAVE_WX = False
try:
import wx
import wx.grid
import wx.html
import wx.lib.ClickableHtmlWindow
import wx.aui
import wx.lib.colourdb
from wx.lib.floatcanvas import NavCanvas, FloatCanvas, Resources
from wx.lib import scrolledpanel
import wx.lib.mixins.listctrl as listmix
from wx import ImageFromStream, BitmapFromImage
HAVE_WX = True
print('WX GUI tools available.')
except ImportError as ex:
print('\nWX GUI tools not available.')
print(ex)
HAVE_URLLIB2 = True
try:
import urllib2
except:
HAVE_URLLIB2 = False
if HAVE_WX:
class ModelEditor(wx.Frame):
PanelSize = (1100, 600)
RPwidth = 0.5
RPheight = 1.0
LPwidth = 0.5
LPheight = 1.0
mainSizer = None
RGridCol = None
RGridRow = None
rlabels = (
'Reaction',
'Name',
'Flux',
'd',
'LB',
'UB',
'RCost',
'Exch',
'Balanced',
)
NOVAL = (None, '', 'None')
RGrid = None
pybox = None
NoteB1 = None
NoteB1_Psession = None
NoteB1_Preaction = None
NoteB1_Prelate = None
NoteB1_Pgene = None
FCanvas_met = None
FCanvas_gen = None
Colour = None
Rinfbox = None
_cmod_ = None
_cbm_ = None
MainPanel = None
StatusBar = None
OUT_dir = None
OUT_file = None
STS_OBJ_FUNC = None
STS_OBJ_SENSE = None
STS_OBJ_COND = None
BUT_minsum = None
BUT_optimise = None
RGridOK = True
__ActiveReaction = None
__ScaledReducedCost = False
__BoundaryDetection = True
WX_COLOURS = None
FC_LineStyles = None
__C_BUFF__ = None
RGdict = None
SGdict = None
GGdict = None
Rmap = None
Smap = None
GPRmap = None
PRGmap = None
OnlyActiveReactions = False
ZERO_TOL = 1.0e-11
SEARCH_MODE = 0
RTreeCache = None
ReactionBalanceInfo = None
FontCache = None
SemanticSBMLClient = None
def __init__(self, cmod):
mystyle = wx.DEFAULT_DIALOG_STYLE
mystyle = wx.DEFAULT_FRAME_STYLE
fSize = wx.DisplaySize()
fSize = fSize[0], fSize[1] - 50
wx.Frame.__init__(
self, None, style=mystyle, name='frame1', pos=(0, 0), size=fSize
)
# self.SetSize(self.PanelSize)
# self.SetMinSize(self.PanelSize)
# self.ShowFullScreen(True)
self.SetTitle(
'PySCeS-CBM Model Editor - editing: %s (%s)'
% (cmod.getId(), cmod.getName())
)
fSize = self.GetSize()
fSize = fSize[0], fSize[1] - 20
self.MainPanel = wx.Panel(self, -1, size=fSize)
self.MainPanel.SetSize(fSize)
self.MainPanel.SetMinSize(fSize)
self.PanelSize = self.MainPanel.GetSize()
self._cmod_ = cmod
self.__C_BUFF__ = []
self.RGridRow = cmod.getReactionIds()
self.RGdict = {}
self.SGdict = {}
self.GGdict = {}
self.Rmap = {}
self.Smap = {}
self.GPRmap = {}
self.PRGmap = {}
self.RTreeCache = []
self.ReactionBalanceInfo = {}
self.FontCache = {}
self.BuildMainPanel()
self.CreateRGrid()
# self.LeftPanel.Refresh()
# self.MainPanel.UpdateWindowUI()
import cbmpy as cbm
self._cbm_ = cbm
self.OUT_dir = os.getcwd()
self.UpdateModelStatus()
self.CreateMaps()
try:
# self.RESTClient = cbm.CBNetDB.RESTClient()
self.SemanticSBMLClient = cbm.CBNetDB.SemanticSBML()
except:
print('REST web serices not available')
def EVT_SEARCH_MODE_SELECT(self, event):
if event.GetId() == 211:
self.SEARCH_MODE = 0
self.searchSelectmenu.Check(211, True)
self.searchSelectmenu.Check(212, False)
self.SEARCH_OBJ.Enable(True)
elif event.GetId() == 212:
self.SEARCH_MODE = 1
self.searchSelectmenu.Check(211, False)
self.searchSelectmenu.Check(212, True)
self.SEARCH_OBJ.Enable(False)
print('Search mode selected:', self.SEARCH_MODE, event.GetId())
def BuildMainPanel(self):
# build some frame stuff
# Setting up the menu.
filemenu = wx.Menu()
menuSave = filemenu.Append(
101, "&Save Model", " Save to SBML", kind=wx.ID_SAVE
)
menuExport = filemenu.Append(
102,
"Save S&ession",
" Export session as Python script",
kind=wx.ID_SAVE,
)
menuAbout = filemenu.Append(
103, "&About", " Information about this program", kind=wx.ID_ABOUT
)
menuExit = filemenu.Append(
104, "E&xit", " Terminate the program", kind=wx.ID_EXIT
)
self.searchSelectmenu = wx.Menu()
self.CHECK_AUTOCOMP = self.searchSelectmenu.Append(
211, 'AutoComplete', kind=wx.ITEM_CHECK
)
self.CHECK_SUBSEARCH = self.searchSelectmenu.Append(
212, 'Subsearch', kind=wx.ITEM_CHECK
)
self.Bind(wx.EVT_MENU, self.EVT_SEARCH_MODE_SELECT, self.CHECK_AUTOCOMP)
self.Bind(wx.EVT_MENU, self.EVT_SEARCH_MODE_SELECT, self.CHECK_SUBSEARCH)
self.searchSelectmenu.Check(self.CHECK_AUTOCOMP.GetId(), True)
optionsMenu = wx.Menu()
optionsMenu.AppendMenu(201, '&Search mode', self.searchSelectmenu)
actionMenu = wx.Menu()
menuAnalyseBalances = actionMenu.Append(
301, 'Run &Balance Checker', kind=wx.ID_DEFAULT
)
SemSBMLMenu = wx.Menu()
menuSemSBMLMenu_name = SemSBMLMenu.Append(311, 'Name', kind=wx.ID_DEFAULT)
menuSemSBMLMenu_id = SemSBMLMenu.Append(312, 'Id', kind=wx.ID_DEFAULT)
actionMenu.AppendMenu(302, 'Run Semantic &SBML', SemSBMLMenu)
self.Bind(wx.EVT_MENU, self.MENUAnalyseBalances, menuAnalyseBalances)
self.Bind(wx.EVT_MENU, self.SemSBML_name, menuSemSBMLMenu_name)
self.Bind(wx.EVT_MENU, self.SemSBML_id, menuSemSBMLMenu_id)
# Creating the menubar and statusbar
menuBar = wx.MenuBar()
menuBar.Append(filemenu, "&File") # Adding the "filemenu" to the MenuBar
menuBar.Append(
optionsMenu, "&Options"
) # Adding the "filemenu" to the MenuBar
menuBar.Append(
actionMenu, "&Action"
) # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
# Menu Events.
self.Bind(wx.EVT_MENU, self.MENUOnSave, menuSave)
self.Bind(wx.EVT_MENU, self.MENUExport, menuExport)
self.Bind(wx.EVT_MENU, self.MENUOnExit, menuExit)
self.Bind(wx.EVT_MENU, self.MENUOnAbout, menuAbout)
self.StatusBar = (
self.CreateStatusBar()
) # A StatusBar in the bottom of the window
self.SetStatusText("Ready!")
# create the sizers
self.mainSizer = wx.BoxSizer(wx.HORIZONTAL)
# Checkbox
## self.insure = wx.CheckBox(self, label="Do you want Insured Shipment ?")
## grid.Add(self.insure, pos=(4,0), span=(1,2), flag=wx.BOTTOM, border=5)
## self.Bind(wx.EVT_CHECKBOX, self.EvtCheckBox, self.insure)
# Radio Boxes
## radioList = ['blue', 'red', 'yellow', 'orange', 'green', 'purple', 'navy blue', 'black', 'gray']
## rb = wx.RadioBox(self, label="What color would you like ?", pos=(20, 210), choices=radioList, majorDimension=3,
## style=wx.RA_SPECIFY_COLS)
## grid.Add(rb, pos=(5,0), span=(1,2))
## self.Bind(wx.EVT_RADIOBOX, self.EvtRadioBox, rb)
# create the Lpanel
leftSizer = wx.BoxSizer(wx.VERTICAL)
self.LeftPanel = wx.Panel(self.MainPanel, -1)
self.RGridGap = 80
gridSize = (
self.PanelSize[0] * self.LPwidth,
self.PanelSize[1] - self.RGridGap,
)
self.RGridInitSize = gridSize
self.LeftPanel.SetMinSize(gridSize)
# Define a dynamically updatable grid
self.RGrid_scrollwindow = wx.ScrolledWindow(
id=-1,
size=gridSize,
name='RGrid_scrollwindow',
parent=self.LeftPanel,
style=wx.HSCROLL | wx.VSCROLL,
)
self.RGrid = wx.grid.Grid(
id=-1,
name='RGrid',
size=gridSize,
parent=self.RGrid_scrollwindow,
style=0,
)
self.RGridCol = len(self.rlabels)
self.RGrid.CreateGrid(len(self._cmod_.reactions), self.RGridCol)
# self.RGrid.SetSize(gridSize)
# self.RGrid.SetMinSize(gridSize)
## self.RGrid.SetMargins(5,5)
self.RGrid.EnableScrolling(True, True)
self.RGrid.SetScrollbars(10, 10, 10, 10)
self.RGrid.Bind(wx.grid.EVT_GRID_SELECT_CELL, self.EvtRGridCellSelect)
self.RGrid.Bind(wx.grid.EVT_GRID_CELL_CHANGE, self.EvtRGridCellChange)
# Some buttons
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
ButtonPanel = wx.Panel(self.LeftPanel, -1)
# ButtonPanel.SetSize((20,30))
# ButtonPanel.SetMinSize((20,30))
self.BUT_optimise = wx.Button(ButtonPanel, label="Optimize")
self.BUT_minsum = wx.Button(ButtonPanel, label="Min. SumAbsFlux")
self.Bind(wx.EVT_BUTTON, self.EVT_BUT_optimise, self.BUT_optimise)
self.Bind(wx.EVT_BUTTON, self.EVT_BUT_minsum, self.BUT_minsum)
buttonSizer.Add(self.BUT_optimise, 1, wx.CENTER)
buttonSizer.Add(self.BUT_minsum, 1, wx.CENTER)
ButtonPanel.SetSizer(buttonSizer)
# add status text controls
statusSizer = wx.GridSizer(2, 4, 1, 1)
StatusPanel = wx.Panel(
self.LeftPanel, -1, size=wx.Size(self.PanelSize[0] * self.LPwidth, -1)
)
STS_OBJ_FUNC_LBL = wx.TextCtrl(
StatusPanel, -1, style=wx.TE_READONLY | wx.TE_CENTER
)
STS_OBJ_FUNC_LBL.write('ObjValue')
STS_OBJ_FUNC_LBL.SetBackgroundColour(wx.Colour(255, 255, 153))
statusSizer.Add(STS_OBJ_FUNC_LBL)
STS_OBJ_SENSE_LBL = wx.TextCtrl(
StatusPanel, -1, style=wx.TE_READONLY | wx.TE_CENTER
)
STS_OBJ_SENSE_LBL.write('ObjSense')
STS_OBJ_SENSE_LBL.SetBackgroundColour(wx.Colour(255, 255, 153))
statusSizer.Add(STS_OBJ_SENSE_LBL)
STS_OBJ_COND_LBL = wx.TextCtrl(
StatusPanel, -1, style=wx.TE_READONLY | wx.TE_CENTER
)
STS_OBJ_COND_LBL.write('ObjStatus')
STS_OBJ_COND_LBL.SetBackgroundColour(wx.Colour(255, 255, 153))
statusSizer.Add(STS_OBJ_COND_LBL)
SEARCH_OBJ_LBL = wx.TextCtrl(
StatusPanel, -1, style=wx.TE_READONLY | wx.TE_CENTER
)
SEARCH_OBJ_LBL.write('SearchReactions')
SEARCH_OBJ_LBL.SetMinSize(wx.Size(130, -1))
SEARCH_OBJ_LBL.SetBackgroundColour(wx.Colour(102, 255, 255))
statusSizer.Add(SEARCH_OBJ_LBL)
## the combobox Control
# self.SEARCH_SELECT_COMB = wx.ComboBox(StatusPanel, size=wx.Size(130, -1), choices=['AutoComplete', 'SubString'], style=wx.CB_READONLY)
# self.SEARCH_SELECT_COMB.SetStringSelection('AutoComplete')
# statusSizer.Add(self.SEARCH_SELECT_COMB)
## self.Bind(wx.EVT_COMBOBOX, self.EvtComboBox, self.edithear)
self.STS_OBJ_FUNC = wx.TextCtrl(
StatusPanel, -1, style=wx.TE_READONLY | wx.TE_CENTER
)
statusSizer.Add(self.STS_OBJ_FUNC)
self.STS_OBJ_SENSE = wx.TextCtrl(
StatusPanel, -1, style=wx.TE_READONLY | wx.TE_CENTER
)
statusSizer.Add(self.STS_OBJ_SENSE)
self.STS_OBJ_COND = wx.TextCtrl(
StatusPanel, -1, style=wx.TE_READONLY | wx.TE_CENTER
)
statusSizer.Add(self.STS_OBJ_COND)
def SearchSelectCallback(sList):
print('Selection list', sList)
self.UpdateReactionGraph(sList[0])
self.updateInfoFromReactionName(sList[0])
self.SelectGridRow(sList[0])
try:
self.SEARCH_OBJ = TextCtrlAutoComplete(
StatusPanel,
choices=self._cmod_.getReactionIds(),
selectCallback=SearchSelectCallback,
style=wx.TE_LEFT,
)
self.SEARCH_OBJ.SetMinSize(wx.Size(130, -1))
self.SEARCH_OBJ.Enable(True)
except:
self.SEARCH_OBJ = wx.TextCtrl(
StatusPanel,
-1,
choices=self._cmod_.getReactionIds(),
style=wx.TE_READONLY | wx.TE_CENTER,
)
self.SEARCH_OBJ.SetMinSize(wx.Size(130, -1))
self.SEARCH_OBJ.Enable(True)
statusSizer.Add(self.SEARCH_OBJ)
StatusPanel.SetSizer(statusSizer)
self.StatusPanel = StatusPanel
leftSizer.Add(StatusPanel)
leftSizer.AddSpacer(2)
leftSizer.Add(ButtonPanel)
leftSizer.AddSpacer(2)
leftSizer.Add(self.RGrid_scrollwindow)
self.LeftPanel.SetSizer(leftSizer)
# create the Rpanel notebook
rightSizer = wx.BoxSizer(wx.VERTICAL)
self.RightPanel = wx.Panel(self.MainPanel, -1)
NoteB1size = (
self.PanelSize[0] * self.RPwidth,
self.PanelSize[1] * self.RPheight,
)
self.NoteB1 = wx.Notebook(self.RightPanel, size=NoteB1size)
self.NoteB1.SetSize(NoteB1size)
self.NoteB1.SetMinSize(NoteB1size)
## self.NoteB1.SetPadding((10,-1))
# create panels
RinfSize = wx.Size(NoteB1size[0] - 8, NoteB1size[1] - 25)
self.NoteB1_Psession = wx.Panel(self.NoteB1, -1)
self.NoteB1_Preaction = wx.Panel(self.NoteB1, -1)
self.NoteB1_Pspecies = wx.Panel(self.NoteB1, -1)
self.NoteB1_Prelate = wx.Panel(self.NoteB1, -1)
self.NoteB1_Pgene = wx.Panel(self.NoteB1, -1)
# self.NoteB1_PRedit = wx.Panel(self.NoteB1, -1)
self.NoteB1_PRedit = wx.lib.scrolledpanel.ScrolledPanel(
self.NoteB1,
-1,
style=wx.TAB_TRAVERSAL | wx.SUNKEN_BORDER | wx.HSCROLL | wx.VSCROLL,
)
self.NoteB1_PRedit.SetAutoLayout(1)
self.NoteB1_PRedit.SetupScrolling()
# create pybox: python script window
self.PyBox = wx.TextCtrl(
self.NoteB1_Psession,
-1,
size=self.NoteB1.GetVirtualSize(),
style=wx.TE_MULTILINE | wx.HSCROLL,
)
PyBox_sizer = wx.BoxSizer(wx.VERTICAL)
PyBox_sizer.Add(self.PyBox, -1, wx.EXPAND)
# self.NoteB1_Psession.Fit(PyBox_sizer)
self.PyBox.SetEditable(False)
self.writeCmd("#############################\n# ")
self.writeCmd("# PySCeS-CBM GUI generated command file ")
self.writeCmd("# Please note that the model instance is: cmod ")
self.writeCmd("#\n#############################\n")
self.writeCmd("import cbmpy\n")
self.writeCmd("# cmod = cbmpy.CBRead.readSBML2FBA(ModelFile, ModelDir)\n")
self.PyBox.SetMinSize(self.NoteB1.GetVirtualSize())
# create html panel for reaction information
self.Rinfbox = wx.html.HtmlWindow(
self.NoteB1_Preaction,
-1,
size=self.NoteB1.GetVirtualSize(),
style=wx.html.HW_SCROLLBAR_AUTO,
)
self.Rinfbox.SetBorders(0)
Rinfbox_sizer = wx.BoxSizer(wx.VERTICAL)
Rinfbox_sizer.Add(self.Rinfbox, 0, wx.EXPAND)
self.Rinfbox.SetMinSize(self.NoteB1.GetVirtualSize())
# create reaction editor
self.FontCache.update(
{
'SanSer12CS': wx.Font(
12, wx.SWISS, wx.NORMAL, wx.NORMAL, False, u'Comic Sans MS'
)
}
)
self.FontCache.update(
{'SanSer12': wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL, False)}
)
self.FontCache.update(
{'SanSer10': wx.Font(10, wx.SWISS, wx.NORMAL, wx.NORMAL, False)}
)
# create the box sizer
Reditbox_sizer = wx.BoxSizer(wx.VERTICAL)
# create the edit button
# self.BUT_EnableEdit = wx.Button(self.NoteB1_PRedit, label="Edit Reaction")
# self.Bind(wx.EVT_BUTTON, self.EVT_BUT_EnableEdit, self.BUT_EnableEdit)
# Reditbox_sizer.Add(self.BUT_EnableEdit)
# create the component grid
# Reditgrid_sizer = wx.GridSizer(3,2,1,1)
# grow = 10
# gcol = 2
# cntr = True
# self.TEXTBOXES_Redit = []
# for tp in range(grow*gcol):
# cellHeight = -1
# if cntr:
# tx = wx.TextCtrl(self.NoteB1_PRedit, style=wx.DEFAULT | wx.TE_READONLY | wx.TE_CENTER)
# tx.SetSize(wx.Size(RinfSize[1]/4, cellHeight))
# tx.SetMinSize(wx.Size(RinfSize[1]/4, cellHeight))
# tx.SetFont(self.FontCache['SanSer10'])
# cntr = False
# else:
# tx = wx.TextCtrl(self.NoteB1_PRedit, style=wx.DEFAULT | wx.TE_LEFT | wx.EXPAND )
# tx.SetSize(wx.Size(RinfSize[1]/2, cellHeight))
# tx.SetMinSize(wx.Size(RinfSize[1]/2, cellHeight))
# tx.SetFont(self.FontCache['SanSer10'])
# cntr = True
# tx.Disable()
# tx.WriteText(str(tp+1))
# tx.PID = str(tp+1)
# Reditgrid_sizer.Add(tx, wx.EXPAND)
# self.TEXTBOXES_Redit.append(tx)
# Reditbox_sizer.Add(Reditgrid_sizer, wx.EXPAND) # add to boxsizer
# create the annotate button
# self.BUT_Annotate = wx.Button(self.NoteB1_PRedit, label="SemanticSBML")
# self.Bind(wx.EVT_BUTTON, self.EVT_BUT_Annotate, self.BUT_Annotate)
# Reditbox_sizer.Add(self.BUT_Annotate)
# the big text box
# self.TEXTBOX_Annotate = wx.TextCtrl(self.NoteB1_PRedit, style=wx.TE_MULTILINE | wx.EXPAND )
# self.TEXTBOX_Annotate.SetMinSize(wx.Size(self.NoteB1.GetVirtualSize()[1], -1))
# self.TEXTBOX_Annotate.SetFont(self.FontCache['SanSer10'])
# self.TEXTBOX_Annotate.Disable()
# Reditbox_sizer.Add(self.TEXTBOX_Annotate, wx.EXPAND) # add to boxsizer
# add boxSizer to Panel
self.NoteB1_PRedit.SetSizer(Reditbox_sizer, wx.DEFAULT)
self.NoteB1_PRedit.SetMinSize(self.NoteB1.GetVirtualSize())
# create html panel for reagent information
self.Sinfbox = wx.lib.ClickableHtmlWindow.PyClickableHtmlWindow(
self.NoteB1_Pspecies,
-1,
size=self.NoteB1.GetVirtualSize(),
style=wx.html.HW_SCROLLBAR_AUTO,
)
self.Sinfbox.SetBorders(0)
Sinfbox_sizer = wx.BoxSizer(wx.VERTICAL)
Sinfbox_sizer.Add(self.Sinfbox, -1, wx.EXPAND)
self.Sinfbox.SetMinSize(self.NoteB1.GetVirtualSize())
# Create canvas panels for graphical representations
wx.lib.colourdb.updateColourDB()
self.WX_COLOURS = wx.lib.colourdb.getColourList()
self.FC_LineStyles = list(FloatCanvas.DrawObject.LineStyleList)
# create a met/gen canvas and do voodoo to get it to fill the notebook panel
self.MetCanvas = NavCanvas.NavCanvas(
self.NoteB1_Prelate,
-1,
size=self.NoteB1.GetVirtualSize(),
Debug=0,
ProjectionFun=None,
BackgroundColor="DARK SLATE BLUE",
)
self.FCanvas_met = self.MetCanvas.Canvas
self.FCanvas_met.SetProjectionFun(None)
self.FCanvas_met.InitAll()
self.GenCanvas = NavCanvas.NavCanvas(
self.NoteB1_Pgene,
-1,
size=self.NoteB1.GetVirtualSize(),
Debug=0,
ProjectionFun=None,
BackgroundColor="DARK SLATE BLUE",
)
self.FCanvas_gen = self.GenCanvas.Canvas
self.FCanvas_gen.SetProjectionFun(None)
self.FCanvas_gen.InitAll()
NC1_sizer = wx.BoxSizer(wx.VERTICAL)
NC1_sizer.Add(self.MetCanvas, 1, wx.EXPAND)
NC2_sizer = wx.BoxSizer(wx.VERTICAL)
NC2_sizer.Add(self.GenCanvas, 1, wx.EXPAND)
self.FCanvas_met.SetMinSize(self.NoteB1.GetVirtualSize())
self.FCanvas_gen.SetMinSize(self.NoteB1.GetVirtualSize())
## self.FCanvas_met.Bind(FloatCanvas.EVT_LEFT_DOWN, self.EVT_FC1_onClick)
## self.FCanvas_gen.Bind(FloatCanvas.EVT_LEFT_DOWN, self.EVT_FC2_onClick)
# add the pages to the notebook with the label to show on the tab
self.NoteB1.AddPage(self.NoteB1_Psession, "Session")
self.NoteB1.AddPage(self.NoteB1_Preaction, "Reaction")
self.NoteB1.AddPage(self.NoteB1_Prelate, "Metabolism")
# TODO
self.NoteB1.AddPage(self.NoteB1_Pgene, "Genes")
self.NoteB1.AddPage(self.NoteB1_PRedit, "ReacEdt")
self.NoteB1.AddPage(self.NoteB1_Pspecies, "MIRIAM")
rightSizer.Add(self.NoteB1)
# self.RightPanel.Fit(rightSizer)
# Add components to the main panel
self.mainSizer.Add(self.LeftPanel, 1, wx.EXPAND)
self.mainSizer.Add(self.RightPanel, 1, wx.EXPAND)
self.MainPanel.SetSizerAndFit(self.mainSizer)
## Resize control
self.Bind(wx.EVT_SIZE, self.EVT_FRAME_resize, self)
##GENERIC click event
# self.Bind(wx.EVT_SIZE, self.EVT_onClick, self.MainPanel)
def UpdateModelStatus(self):
self.STS_OBJ_SENSE.Clear()
self.STS_OBJ_COND.Clear()
self.STS_OBJ_FUNC.Clear()
self.STS_OBJ_COND.write(str(self._cmod_.SOLUTION_STATUS))
if self._cmod_.SOLUTION_STATUS == 'LPS_OPT':
self.STS_OBJ_COND.SetBackgroundColour(wx.Colour(255, 255, 255))
else:
self.STS_OBJ_COND.SetBackgroundColour(wx.Colour(255, 0, 51))
if self._cmod_.getActiveObjective() != None:
self.STS_OBJ_FUNC.write(
str(self._cmod_.getActiveObjective().getValue())
)
self.STS_OBJ_SENSE.write(self._cmod_.getActiveObjective().operation)
else:
self.STS_OBJ_FUNC.write('None')
self.STS_OBJ_SENSE.write('None')
def UpdateReactionInfo(self, rid):
print('Updating reaction info')
r = self._cmod_.getReaction(rid)
rs = '<h2>%s</h2><p>%s</p>' % (rid, r.getName())
rs += '<h3>Equation</h3><p>%s</p>' % self.GetEquation(r)
rs += '<p><br/></p>'
## rs += '<h3>Details</h3>'
props = '<tr><th colspan="2"><strong>Properties</strong></th></tr>'
props += (
'<tr><td>%s</td><td>%s</td></tr><tr><td>%s</td><td>%s</td></tr>'
% ('Reversible', r.reversible, 'Exchange', r.is_exchange)
)
props += '<tr><th colspan="2"><strong>Annotations</strong></th></tr>'
RA = r.getAnnotations()
for a in RA:
props += '<tr><td>%s</td><td>%s</td></tr>' % (a, RA[a])
## rs += '<table border="1" cellpadding="5" width="70%s">%s</table>' % ('\\%', props)
rs += '<table border="1" cellpadding="5">%s</table>' % (props)
if self.ReactionBalanceInfo != None:
if rid in self.ReactionBalanceInfo:
rs += '<h3>Balancing information</h3>'
bi = self.ReactionBalanceInfo[rid]
# print bi
bal = (
'<tr><td colspan="2">Charge balanced: %s {\'charge\', %s}</td></tr>'
% (bi['charge_balanced'], bi['charge'])
)
bal += '<tr><td colspan="2">Element balanced: %s %s</td></tr>' % (
bi['element_balanced'],
bi['elements'],
)
for rre in bi['stuff']:
sbal = ''
coeff = None
for det in range(len(rre)):
out = ''
if det == 0:
out = rre[det]
if det == 1:
coeff = rre[det]
# out = re[det]
elif det == 3:
out = ''
if rre[2] == '' or rre[3] == None:
out = 'Unknown'
else:
for e in rre[det]:
out += '%.1f %s, ' % (coeff * e[1], e[0])
out = out[:-2]
if out != '':
sbal += '<td>%s</td>' % out
bal += '<tr>%s</tr>' % (sbal)
rs += '<table border="1" cellpadding="5">%s</table>' % (bal)
props = ''
rs += '<h3>Reagents</h3>'
for sid in r.getSpeciesIds():
s = self._cmod_.getSpecies(sid)
props += '<h4>%s</h4><table border="1" cellpadding="5">' % sid
## props += '<tr><th colspan="2"><strong>%s</strong></th></tr>' % sid
props += (
'<tr><td>%s</td><td>%s</td></tr><tr><td>%s</td><td>%s</td></tr>'
% ('Name', s.getName(), 'Compartment', s.compartment)
)
props += '<tr><td>%s</td><td>%s</td></tr>' % ('Fixed', s.is_boundary)
props += '<tr><td>%s</td><td>%s</td></tr>' % (
'Coefficient',
r.getReagentWithSpeciesRef(sid).coefficient,
)
## props += '<tr><th colspan="2">Annotations</th></tr>'
props += (
'<tr><td>%s</td><td>%s</td></tr><tr><td>%s</td><td>%s</td></tr>'
% ('ChemFormula', s.chemFormula, 'Charge', s.charge)
)
## SA = s.getAnnotations()
## for a in RA:
## props += '<tr><td>%s</td><td>%s</td></tr>' % (a, SA[a])
## rs += '<table border="1" cellpadding="5" width="70%s">%s</table>' % ('\\%', props)
props += '</table>'
rs += props
## rs += '<table border="1" cellpadding="3" width="80%s"><tr><th>Property</th><th>Value</th></tr>%s</table>' % ('\\%', props)
self.Rinfbox.SetPage("<html><body>%s</body></html>" % rs)
def SelectGridRow(self, rid):
self.RGrid.SelectRow(self.RGridRow.index(rid))
self.RGrid.MakeCellVisible(self.RGridRow.index(rid), 0)
def UpdateReactionGraph(self, rid):
R = self._cmod_.getReaction(rid)
subs = []
prods = []
self.FCanvas_met.ClearAll()
STB_LW = 2
STB_PAD = 0.15
STB_SZ = 0.25
STB_LC = 'Black'
GC = (0, 0)
FC_OBJ = []
if rid not in self.RGdict:
STB = FloatCanvas.ScaledTextBox(
rid,
GC,
Size=STB_SZ,
PadSize=STB_PAD,
LineWidth=STB_LW,
LineColor=STB_LC,
Family=wx.FONTFAMILY_DEFAULT,
Weight=wx.BOLD,
BackgroundColor='White',
)
self.__STB_center__(STB)
STB.Name = rid
STB.HitFill = True
STB.HitLineWidth = 5
FC_OBJ.append(STB)
for rr in R.reagents:
if rr.coefficient < 0:
subs.append(rr)
elif rr.coefficient > 0:
prods.append(rr)
radius = None
if len(R.reagents) == 1:
radius = 2
cxy = [(0, radius)]
elif len(R.reagents) == 2:
radius = 2
cxy = circlePoints(
totalPoints=len(R.reagents),
startAngle=0,
arc=360,
circleradius=radius,
centerxy=GC,
direction='forward',
evenDistribution=True,
)
else:
if len(R.reagents) < 12:
radius = 3
else:
radius = 10
cxy = circlePoints(
totalPoints=len(R.reagents),
startAngle=270,
arc=360,
circleradius=radius,
centerxy=GC,
direction='forward',
evenDistribution=True,
)
scntr = 0
rcntr = 0
for s in range(len(subs)):
if self._cmod_.getSpecies(subs[s].species_ref).is_boundary:
STB_LC = 'Red'
else:
STB_LC = 'Black'
if abs(subs[s].coefficient) != 1:
cf = '{%.1f} ' % abs(subs[s].coefficient)
else:
cf = ''
STB = FloatCanvas.ScaledTextBox(
'%s%s' % (cf, subs[s].species_ref),
cxy[rcntr],
Size=STB_SZ,
PadSize=STB_PAD,
LineWidth=STB_LW,
LineColor=STB_LC,
Family=wx.SWISS,
BackgroundColor='Green',
)
STB.Name = subs[s].species_ref
self.__STB_center__(STB)
STB.HitFill = True
STB.HitLineWidth = 5
FC_OBJ.append(STB)
scntr += 1
rcntr += 1
pcntr = 0
for p in range(len(prods)):
if self._cmod_.getSpecies(prods[p].species_ref).is_boundary:
STB_LC = 'Red'
else:
STB_LC = 'Black'
if abs(prods[p].coefficient) != 1:
cf = '{%.1f} ' % abs(prods[p].coefficient)
else:
cf = ''
STB = FloatCanvas.ScaledTextBox(
'%s%s' % (cf, prods[p].species_ref),
cxy[rcntr],
Size=STB_SZ,
PadSize=STB_PAD,
LineWidth=STB_LW,
LineColor=STB_LC,
Family=wx.SWISS,
BackgroundColor='Yellow',
)
STB.Name = prods[p].species_ref
self.__STB_center__(STB)
STB.HitFill = True
STB.HitLineWidth = 5
FC_OBJ.append(STB)
pcntr += 1
rcntr += 1
self.RGdict.update({rid: {'obj': FC_OBJ}})
## print 'Drawing from scratch'
else:
## print 'Using cached graph'
FC_OBJ = self.RGdict[rid]['obj']
self.FCanvas_met.AddObjects(FC_OBJ)
for o in FC_OBJ:
o.Bind(FloatCanvas.EVT_FC_LEFT_DOWN, self.HIT_STB_onClick)
self.FCanvas_met.Draw(True)
self.FCanvas_met.ZoomToBB()
def __STB_center__(self, stb):
box = stb.GetBoxRect()
stb.SetPoint(
(box[0][0] - abs(box[1][0]) / 2.0, box[0][1] - abs(box[1][1]) / 2.0)
)
return stb.GetBoxRect()
def UpdateSpeciesGraph(self, sid):
# print sid, self.Smap[sid]
STB_LW = 2
STB_PAD = 0.15
STB_SZ = 0.25
STB_LC = 'Black'
GC = (0, 0)
radius = None
rList = self.Smap[sid]
if self.OnlyActiveReactions:
rList = [
j
for j in self.Smap[sid]
if round(abs(self._cmod_.getReaction(j).getValue()), 6) != 0.0
]
self.FCanvas_met.ClearAll()
FC_OBJ = []
if sid not in self.SGdict:
orphan = False
print('ReactionList', rList)
if len(rList) == 0:
radius = 2
cxz = GC
elif len(rList) == 1:
radius = 2
cxy = [(0, radius)]
elif len(rList) == 2:
radius = 2
cxy = circlePoints(
totalPoints=len(rList),
startAngle=0,
arc=360,
circleradius=radius,
centerxy=GC,
direction='forward',
evenDistribution=True,
)
else:
if len(rList) < 12:
radius = 3
elif len(rList) < 24:
radius = 6
elif len(rList) < 36:
radius = 9
else:
radius = 15
cxy = circlePoints(
totalPoints=len(rList),
startAngle=270,
arc=360,
circleradius=radius,
centerxy=GC,
direction='forward',
evenDistribution=True,
)
print(cxy)
STB = FloatCanvas.ScaledTextBox(
sid,
GC,
Size=STB_SZ,
PadSize=STB_PAD,
LineWidth=STB_LW,
LineColor=STB_LC,
Family=wx.NORMAL,
Weight=wx.BOLD,
BackgroundColor='White',
)
self.__STB_center__(STB)
STB.Name = sid
STB.HitFill = True
STB.HitLineWidth = 5
FC_OBJ.append(STB)
rcntr = 0
for R in range(len(rList)):
cf = ''
STB = FloatCanvas.ScaledTextBox(
'%s%s' % (cf, rList[R]),
cxy[R],
Size=STB_SZ,
PadSize=STB_PAD,
LineWidth=STB_LW,
LineColor=STB_LC,
Family=wx.SWISS,
BackgroundColor='Yellow',
)
STB.Name = self.Smap[sid][R]
self.__STB_center__(STB)
STB.HitFill = True
STB.HitLineWidth = 5
FC_OBJ.append(STB)
rcntr += 1
self.SGdict.update({sid: {'obj': FC_OBJ}})
## print 'Drawing from scratch'
else:
## print 'Using cached graph'
FC_OBJ = self.SGdict[sid]['obj']
self.FCanvas_met.AddObjects(FC_OBJ)
for o in FC_OBJ:
o.Bind(FloatCanvas.EVT_FC_LEFT_DOWN, self.HIT_STB_onClick)
self.FCanvas_met.Draw(True)
self.FCanvas_met.ZoomToBB()
def CreateMaps(self):
for S in self._cmod_.species:
self.Smap.update({S.getId(): S.isReagentOf()})
for R in self._cmod_.reactions:
self.Rmap.update({R.getId(): R.getSpeciesIds()})
self.GPRmap = self._cmod_.getAllGeneProteinAssociations()
self.PRGmap = self._cmod_.getAllProteinGeneAssociations()
def GetEquation(self, R):
sub = ''
prod = ''
for r in R.reagents:
coeff = abs(r.coefficient)
if r.role == 'substrate':
if coeff == 1.0:
sub += '%s + ' % (r.species_ref)
else:
sub += '{%s} %s + ' % (coeff, r.species_ref)
else:
if coeff == 1.0:
prod += '%s + ' % (r.species_ref)
else:
prod += '{%s} %s + ' % (coeff, r.species_ref)
if R.reversible:
eq = '%s = %s' % (sub[:-3], prod[:-2])
else:
eq = '%s > %s' % (sub[:-3], prod[:-2])
return eq
def CreateRGrid(self):
assert len(self.rlabels) == self.RGridCol, '\nlabels != #col'
grid = self.RGrid
reactions = self._cmod_.reactions
fluxObjs = []
if self._cmod_.getActiveObjective() != None:
fluxObjs = self._cmod_.getActiveObjective().getFluxObjectiveReactions()
for c in range(self.RGridCol):
grid.SetColLabelValue(c, self.rlabels[c])
for r in range(len(reactions)):
for c in range(self.RGridCol):
## print r,c
## print reactions[r].getId()
if self.rlabels[c] == 'LB':
## grid.SetColFormatFloat(c, 10, 3)
grid.SetCellValue(
r,
c,
'%s'
% self._cmod_.getReactionLowerBound(reactions[r].getId()),
)
elif self.rlabels[c] == 'UB':
## grid.SetColFormatFloat(c, 10, 3)
grid.SetCellValue(
r,
c,
'%s'
% self._cmod_.getReactionUpperBound(reactions[r].getId()),
)
elif self.rlabels[c] == 'd':
grid.SetCellValue(r, c, '%s' % ' ')
elif self.rlabels[c] == 'Balanced':
grid.SetReadOnly(r, c, True)
txt = reactions[r].is_balanced
grid.SetCellValue(r, c, ' %s' % str(txt))
if txt:
grid.SetCellBackgroundColour(r, c, wx.Colour(255, 193, 96))
elif self.rlabels[c] == 'Flux':
Rval = reactions[r].getValue()
if Rval != None:
Rval = round(Rval, 10)
else:
Rval = 'None'
grid.SetCellValue(r, c, ' %s' % Rval)
grid.SetReadOnly(r, c, True)
# colour by sign
if Rval == None or Rval == '' or Rval == 'None' or Rval == 0.0:
grid.SetCellBackgroundColour(r, c, wx.Colour(255, 255, 255))
elif Rval < 0.0:
grid.SetCellBackgroundColour(r, c, wx.Colour(255, 204, 204))
elif Rval > 0.0:
grid.SetCellBackgroundColour(r, c, wx.Colour(153, 255, 153))
# boundary detection
elif self.rlabels[c] == 'Exch':
grid.SetReadOnly(r, c, True)
grid.SetCellValue(r, c, str(reactions[r].is_exchange))
elif self.rlabels[c] == 'RCost':
grid.SetReadOnly(r, c, True)
if not self.__ScaledReducedCost:
if reactions[r].reduced_cost != None:
rcval = '%2.3e' % reactions[r].reduced_cost
else:
rcval = 'None'
grid.SetCellValue(r, c, rcval)
elif self.rlabels[c] == 'Reaction':
grid.SetCellBackgroundColour(r, c, wx.Colour(198, 226, 255))
grid.SetCellValue(r, c, str(reactions[r].getId()))
grid.SetReadOnly(r, c, True)
elif self.rlabels[c] == 'Name':
## grid.SetCellBackgroundColour(r,c,wx.Colour(255,255,153))
grid.SetCellValue(r, c, str(reactions[r].getName()))
else:
grid.SetCellValue(r, c, '')
if reactions[r].getId() in fluxObjs:
grid.SetCellBackgroundColour(r, c, wx.Colour(255, 255, 153))
for c in range(self.RGridCol):
if self.rlabels[c] not in ('Name'):
grid.AutoSizeColumn(c, True)
else:
grid.SetColMinimalWidth(c, 40)
def UpdateRGridData(self):
"""
Updates Rgrid numeric data
"""
grid = self.RGrid
reactions = self._cmod_.reactions
if self._cmod_.getActiveObjective() != None:
fluxObjs = self._cmod_.getActiveObjective().getFluxObjectiveReactions()
else:
fluxObjs = []
for r in range(len(reactions)):
for c in range(self.RGridCol):
## if self.rlabels[c] == 'LB':
## grid.SetCellValue(r, c, '%s' % self._cmod_.getFluxBoundByReactionID(reactions[r].getId(), 'lower').getValue())
## elif self.rlabels[c] == 'UB':
## grid.SetCellValue(r, c, '%s' % self._cmod_.getFluxBoundByReactionID(reactions[r].getId(), 'upper').getValue())
if self.rlabels[c] == 'Flux':
Rval = reactions[r].getValue()
try:
Rval_curr = float(grid.GetCellValue(r, c))
except:
Rval_curr = None
if Rval != None:
Rval = round(Rval, 10)
else:
Rval = 'None'
grid.SetCellValue(r, c, ' %s' % Rval)
# colour delta
D_col_idx = self.rlabels.index('d')
if (
Rval in self.NOVAL
or Rval_curr in self.NOVAL
or abs(Rval - Rval_curr) <= self.ZERO_TOL
):
grid.SetCellValue(r, D_col_idx, ' ')
grid.SetCellTextColour(
r, D_col_idx, wx.Colour(255, 255, 255)
)
elif abs(round(Rval, 10)) < abs(round(Rval_curr, 10)):
grid.SetCellValue(r, D_col_idx, ' \/ ')
grid.SetCellTextColour(r, D_col_idx, wx.Colour(255, 0, 0))
elif abs(round(Rval, 10)) > abs(round(Rval_curr, 10)):
grid.SetCellValue(r, D_col_idx, ' /\ ')
grid.SetCellTextColour(r, D_col_idx, wx.Colour(0, 204, 0))
# colour by sign J
if Rval in self.NOVAL or Rval == 0.0:
grid.SetCellBackgroundColour(r, c, wx.Colour(255, 255, 255))
elif Rval < 0.0:
grid.SetCellBackgroundColour(r, c, wx.Colour(255, 204, 204))
elif Rval > 0.0:
grid.SetCellBackgroundColour(r, c, wx.Colour(153, 255, 153))
# boundary detection
if self.__BoundaryDetection:
LB = self._cmod_.getReactionLowerBound(reactions[r].getId())
UB = self._cmod_.getReactionUpperBound(reactions[r].getId())
try:
if abs(Rval - round(LB, 10)) <= self.ZERO_TOL:
grid.SetCellBackgroundColour(
r,
self.rlabels.index('LB'),
wx.Colour(255, 204, 204),
)
else:
grid.SetCellBackgroundColour(
r,
self.rlabels.index('LB'),
wx.Colour(255, 255, 255),
)
except:
print('INFO: LowerBound detector failed')
try:
if abs(Rval - round(UB, 10)) <= self.ZERO_TOL:
grid.SetCellBackgroundColour(
r,
self.rlabels.index('UB'),
wx.Colour(153, 255, 153),
)
else:
grid.SetCellBackgroundColour(
r,
self.rlabels.index('UB'),
wx.Colour(255, 255, 255),
)
except:
print('INFO: UpperBound detector failed')
elif self.rlabels[c] == 'RCost':
if not self.__ScaledReducedCost:
if reactions[r].reduced_cost != None:
rcval = '%2.3e' % reactions[r].reduced_cost
else:
rcval = 'None'
grid.SetCellValue(r, c, rcval)
elif self.rlabels[c] == 'Balanced':
bval = reactions[r].is_balanced
grid.SetCellValue(r, c, str(bval))
if bval != None and not bval:
grid.SetCellBackgroundColour(r, c, wx.Colour(255, 193, 96))
if reactions[r].getId() in fluxObjs:
grid.SetCellBackgroundColour(r, c, wx.Colour(255, 255, 153))
grid.ForceRefresh()
## def EvtGridCellSelect(self, event):
## row = event.GetRow()
## col = event.GetCol()
## self.cell_selected_value = self.RGrid.GetCellValue(row, col)
## print 'selected value', self.cell_selected_value
## event.Skip()
def writeCmd(self, txt):
print('cmd: ' + txt)
self.PyBox.write(txt + '\n')
def MENUOnAbout(self, e):
# Create a message dialog box
dlg = wx.MessageDialog(
self,
" PySCes-CBM model editor\n(C) Brett G. Olivier, Amsterdam 2012",
"About PySCes-CBM model editor",
wx.OK,
)
dlg.ShowModal() # Shows it
dlg.Destroy() # finally destroy it when finished.
def MENUOnExit(self, e):
self.Close(True) # Close the frame.
def MENUExport(self, e):
dlg = wx.FileDialog(
self,
"Enter filename",
self.OUT_dir,
"",
"Python files (*.py)|*.py|All files (*.*)|*.*",
wx.FD_SAVE,
)
if dlg.ShowModal() == wx.ID_OK:
OUT_file = dlg.GetFilename()
self.OUT_dir = dlg.GetDirectory()
if OUT_file[-3:] != '.py':
OUT_file += '.py'
F = file(os.path.join(self.OUT_dir, OUT_file), 'w')
F.write(self.PyBox.GetValue())
F.flush()
F.close()
dlg.Destroy()
def MENUOnSave(self, e):
""" Open a file"""
dlg = wx.FileDialog(
self,
"Enter filename",
self.OUT_dir,
"",
"SBML files (*.xml)|*.xml|All files (*.*)|*.*",
wx.FD_SAVE,
)
if dlg.ShowModal() == wx.ID_OK:
self.OUT_file = dlg.GetFilename()
self.OUT_dir = dlg.GetDirectory()
if self.OUT_file[-4:] != '.xml':
self.OUT_file += '.xml'
self._cmod_.inputfile_id = self.OUT_file
self._cbm_.CBWrite.writeSBML2FBA(
self._cmod_,
self.OUT_file,
directory=self.OUT_dir,
sbml_level_version=None,
)
dlg.Destroy()
def MENUAnalyseBalances(self, e):
''' Check the reaction balances'''
wait = wx.BusyCursor()
rids = self._cmod_.getReactionIds()
self.ReactionBalanceInfo = self._cbm_.CBTools.checkReactionBalanceElemental(
self._cmod_, Rid=rids
)
for r in range(len(self._cmod_.reactions)):
for c in range(self.RGridCol):
if self.rlabels[c] == 'Balanced':
bval = self._cmod_.reactions[r].is_balanced
self.RGrid.SetCellValue(r, c, str(bval))
if bval != None and not bval:
self.RGrid.SetCellBackgroundColour(
r, c, wx.Colour(255, 193, 96)
)
del wait
wx.MessageBox('Balance Check Complete', 'Info', wx.OK | wx.ICON_INFORMATION)
def chkFloat(self, val):
try:
x = float(val)
self.StatusBar.SetStatusText('')
return True
except:
self.StatusBar.SetStatusText('Invalid input %s was not a float!' % val)
return False
def EvtRGridCellSelect(self, event):
print("Cell select event")
row = event.GetRow()
if row != self.__ActiveReaction:
cval = self.RGrid.GetCellValue(row, 0)
self.updateInfoFromReactionName(cval)
event.Skip()
def updateInfoFromReactionName(self, cval):
self.UpdateReactionInfo(cval)
# self.UpdateSpeciesInfoForReaction(cval)
self.UpdateReactionGraph(cval)
self.__ActiveReaction = cval
def EvtRGridCellChange(self, event):
row = event.GetRow()
col = event.GetCol()
clbl = self.RGrid.GetColLabelValue(col)
# All cells have a value, regardless of the editor.
print('Changed cell: ({}, {})'.format(row, col))
rid = self.RGrid.GetCellValue(row, 0)
cell_val = self.RGrid.GetCellValue(row, col)
print('Row/Col: {} | {} | {}'.format(rid, clbl, cell_val))
# update reaction info
self.UpdateReactionInfo(rid)
print(cell_val, type(cell_val))
cell_val = str(cell_val)
if clbl == 'LB':
if self.chkFloat(cell_val):
print(cell_val, type(cell_val))
print('\nLB old', self._cmod_.getReactionLowerBound(rid))
self._cmod_.setReactionLowerBound(rid, cell_val)
print('LB new', self._cmod_.getReactionLowerBound(rid))
if cell_val == 'inf' or cell_val == '-inf':
self.writeCmd(
"cmod.setReactionLowerBound('%s', '%s')" % (rid, cell_val)
)
else:
self.writeCmd(
"cmod.setReactionLowerBound('%s', %s)" % (rid, cell_val)
)
else:
self.RGrid.SetCellValue(
row, col, str(self._cmod_.getReactionLowerBound(rid))
)
elif clbl == 'UB':
if self.chkFloat(cell_val):
print('\nUB old', self._cmod_.getReactionUpperBound(rid))
self._cmod_.setReactionUpperBound(rid, cell_val)
print('UB new', self._cmod_.getReactionUpperBound(rid))
if cell_val == 'inf' or cell_val == '-inf':
self.writeCmd(
"cmod.setReactionUpperBound('%s', '%s')" % (rid, cell_val)
)
else:
self.writeCmd(
"cmod.setReactionUpperBound('%s', %s)" % (rid, cell_val)
)
else:
self.RGrid.SetCellValue(
row, col, str(self._cmod_.getReactionUpperBound(rid))
)
elif clbl == 'Reaction':
pass
elif clbl == 'Name':
print('\nName old', self._cmod_.getReaction(rid).getName())
self._cmod_.getReaction(rid).setName(cell_val)
print('\nName new', self._cmod_.getReaction(rid).getName())
self.writeCmd("cmod.getReaction('%s').setName('%s')" % (rid, cell_val))
event.Skip()
# def EVT_BUT_EnableEdit(self,event):
# print 'EVT_BUT_EnableEdit'
# for tb in self.TEXTBOXES_Redit:
# tb.Enable()
# event.Skip()
def SemSBML_id(self, event):
if self.__ActiveReaction != None:
R = self._cmod_.getReaction(self.__ActiveReaction)
self.SelectGridRow(R.getId())
searchString = R.getId()
self.CallSemanticSBML(searchString)
else:
self.Sinfbox.SetPage(
"<html><body>'<h1>Please select a reaction!</h1>'</body></html>"
)
self.NoteB1.ChangeSelection(5)
event.Skip()
def SemSBML_name(self, event):
if self.__ActiveReaction != None:
R = self._cmod_.getReaction(self.__ActiveReaction)
self.SelectGridRow(R.getId())
searchString = R.getName()
self.CallSemanticSBML(searchString)
else:
self.Sinfbox.SetPage(
"<html><body>'<h1>Please select a reaction!</h1>'</body></html>"
)
self.NoteB1.ChangeSelection(5)
event.Skip()
def CallSemanticSBML(self, searchString):
print('MENUAnalyseSemanticSBML')
# self.TEXTBOX_Annotate.Enable()
rs = ''
if HAVE_URLLIB2:
wait = wx.BusyCursor()
# self.TEXTBOX_Annotate.Clear()
rs = '<h2>SemanticSBML query</h2><br/><h3>%s</h3>' % searchString
# semanticSBML REST web services
site_root = "www.semanticsbml.org"
reply_mode = '.xml'
try:
self.SemanticSBMLClient.Connect(site_root)
searchString = self.SemanticSBMLClient.URLEncode(searchString)
RESTquery = "/semanticSBML/annotate/search%s?q=%s" % (
reply_mode,
searchString.strip().replace(' ', '+'),
)
print(RESTquery)
data1 = self.SemanticSBMLClient.Get(RESTquery)
data1 = self.SemanticSBMLClient.URLDecode(data1)
self.SemanticSBMLClient.Close()
self.writeCmd('SemanticSBMLClient = cbm.CBNetDB.SemanticSBML()')
self.writeCmd('SemanticSBMLClient.Connect(\"%s\")' % (site_root))
self.writeCmd(
'data1 = SemanticSBMLClient.Get(\"%s\")' % (RESTquery)
)
self.writeCmd('SemanticSBMLClient.Close()')
print(self.SemanticSBMLClient.GetLog())
item_re = re.compile('<item>.+?</item>')
items = re.findall(item_re, data1)
items = [
i.replace('<item>', '').replace('</item>', '').strip()
for i in items
]
# self.TEXTBOX_Annotate.write('SemanticSBML results for: %s\n\n' % (urllib2.unquote(searchString)))
rs += '<table border="1" cellpadding="5">'
for i in items:
# url = self.SemanticSBMLClient.MiriamURN2IdentifiersURL(i)
url = i
rs += '<tr><td>%s</td><td><a href=%s>%s</a></td></tr>' % (
i,
url,
url,
)
print(i)
# self.TEXTBOX_Annotate.write('%s\n' % i)<a href="/Education/">NCBI Education</a>
rs += '</table border="1" cellpadding="5">'
del wait
except Exception as ex:
rs += '<h1>Error connecting to: %s</h1>' % (site_root)
# self.TEXTBOX_Annotate.WriteText('\n******************************************************\n')
# self.TEXTBOX_Annotate.WriteText('\n* Error connecting to: %s *\n\n' % (site_root))
# self.TEXTBOX_Annotate.WriteText('******************************************************\n\n')
print(ex)
elif HAVE_URLLIB2:
# self.TEXTBOX_Annotate.WriteText('Please select a reaction!')
rs += '<h1>Please select a reaction!</h1>'
else:
# self.TEXTBOX_Annotate.WriteText('HTTPLIB not available')
rs += '<h1>HTTPLIB not available!</h1>'
self.Sinfbox.SetPage("<html><body>%s</body></html>" % rs)
def EVT_BUT_optimise(self, event):
wait = wx.BusyCursor()
self._cbm_.CBSolver.analyzeModel(self._cmod_)
self.UpdateRGridData()
self.UpdateModelStatus()
self.writeCmd("cbmpy.CBSolver.analyzeModel(cmod)")
del wait
event.Skip()
def EVT_BUT_minsum(self, event):
wait = wx.BusyCursor()
self._cbm_.CBSolver.cplx_MinimizeSumOfAbsFluxes(
self._cmod_,
selected_reactions=None,
pre_opt=True,
tol=None,
objF2constr=True,
rhs_sense='lower',
optPercentage=100.0,
work_dir=None,
quiet=False,
debug=False,
objective_coefficients={},
return_lp_obj=False,
)
self.UpdateRGridData()
self.UpdateModelStatus()
self.writeCmd(
"cbmpy.CBSolver.cplx_MinimizeSumOfAbsFluxes(cmod, selected_reactions=None, pre_opt=True, tol=None, objF2constr=True, rhs_sense='lower', optPercentage=100.0, objective_coefficients={}, return_lp_obj=False)"
)
del wait
event.Skip()
## cbmpy.CBSolver.cplx_MinimizeSumOfAbsFluxes
def PrintCoords(self, event):
print("coords are: {}".format(event.Coords), end=" ")
print("pixel coords are: {}\n".format(event.GetPosition()), end=" ")
def EVT_FRAME_resize(self, event):
# print "Pixel coords are: %s"%(event.GetPosition(),)
print('Panel size', self.MainPanel.GetSize())
# print 'Frame size', self.GetSize()
# print 'Panel minsize', self.MainPanel.GetMinSize()
# print 'Frame minsize', self.GetMinSize()
# print 'Panel virtualsize', self.MainPanel.GetVirtualSize()
# print 'Frame virtualsize', self.GetVirtualSize()
# print 'mainSizer minsize', self.mainSizer.GetMinSize()
self.FRAME_resize()
# print '\n'
def FRAME_resize(self):
# get the frame size and adjust the main panel to it
self.PanelSize = self.GetVirtualSize()
self.MainPanel.SetSize(self.GetVirtualSize())
# LPsize = wx.Size(self.PanelSize[0]*self.LPwidth, self.PanelSize[1]*self.LPheight)
# RPsize = wx.Size(self.PanelSize[0]*self.RPwidth, self.PanelSize[1]*self.RPheight)
# LPsize = wx.Size(self.PanelSize[0]*self.LPwidth, self.RGridInitSize[1])
LPsize = wx.Size(
self.PanelSize[0] * self.LPwidth, self.PanelSize[1] - self.RGridGap
)
RPsize = wx.Size(self.PanelSize[0] * self.RPwidth, self.PanelSize[1])
# using the new panels to resize the components
# self.StatusPanel.SetMinSize(wx.Size(LPsize[0],self.StatusPanel.GetVirtualSize()[1]))
self.RGrid_scrollwindow.SetSize(LPsize)
self.RGrid.SetSize(LPsize)
self.NoteB1.SetSize(RPsize)
RinfSize = wx.Size(
self.PanelSize[0] * self.RPwidth - 8,
self.PanelSize[1] * self.RPheight - 25,
)
self.PyBox.SetSize(RinfSize)
self.Rinfbox.SetSize(RinfSize)
self.Sinfbox.SetSize(RinfSize)
self.MetCanvas.SetSize(RinfSize)
self.GenCanvas.SetSize(RinfSize)
self.RightPanel.SetSize(RPsize)
# self.LeftPanel.Layout()
self.MainPanel.CenterOnParent()
def EVT_onClick(self, event):
print("Pixel coords are: {}".format(event.GetPosition()), end=" ")
# print 'Panel size', self.MainPanel.GetSize()
# print 'Frame size', self.GetSize()
# print 'Panel minsize', self.MainPanel.GetMinSize()
# print 'Frame minsize', self.GetMinSize()
# print 'Panel virtualsize', self.MainPanel.GetVirtualSize()
# print 'Frame virtualsize', self.GetVirtualSize()
# print '\nmainSizer minsize', self.mainSizer.GetMinSize()
# print '\n'
def __C_BUFF_ADD__(self, obj):
self.__C_BUFF__.append(obj)
if len(self.__C_BUFF__) > 10:
return self.__C_BUFF__.pop(0)
else:
return None
def HIT_STB_onClick(self, Object):
print('')
print(repr(Object))
print(Object.Name + " got Hit with Left")
if Object.Name in self.Smap:
self.UpdateSpeciesGraph(Object.Name)
if Object.Name in self.Rmap:
self.UpdateReactionGraph(Object.Name)
self.updateInfoFromReactionName(Object.Name)
self.RGrid.SelectRow(self.RGridRow.index(Object.Name))
self.RGrid.MakeCellVisible(self.RGridRow.index(Object.Name), 0)
## Object.SetLineColor('Green')
## self.FCanvas_met.Draw(True)
## self.FCanvas_met.ZoomToBB()
## wx.CallAfter(self.FCanvas_met.ZoomToBB)
## wx.CallAfter(self.FCanvas_met.Draw)
class HtmlWindowMod(wx.html.HtmlWindow):
"""
Overrides 'OnLinkClicked' to open links in external browser
"""
def __init__(self, *args, **kwargs):
wx.html.HtmlWindow.__init__(*args, **kwargs)
if "gtk2" in wx.PlatformInfo:
self.SetStandardFonts()
def OnLinkClicked(self, link):
wx.LaunchDefaultBrowser(link.GetHref())
def circlePoints(
totalPoints=4,
startAngle=0,
arc=360,
circleradius=1,
centerxy=(0, 0),
direction='forward',
evenDistribution=True,
):
"""
Returns a list of points evenly spread around a circle:
- *totalPoints* how many points
- *startAngle* where to start
- *arc* how far to go
- *circleradius* radius
- *centerxy* origin
- *direction* 'forward' or 'backward'
- *evenDistribution* True/False
This code has been adapted from the Flash example that can be found here:
http://www.lextalkington.com/blog/2009/12/generate-points-around-a-circles-circumference/
"""
## totalPoints = 4
## startAngle = 270
## arc = 180
## direction = 'forward'
## evenDistribution = True
## circleradius = 1
## centerx = 0
## centery = 0
roundfact = 10
mpi = math.pi / 180.0
startRadians = startAngle * mpi
incrementAngle = float(arc) / float(totalPoints)
incrementRadians = incrementAngle * mpi
if arc < 360:
# this spreads the points out evenly across the arc
if evenDistribution:
incrementAngle = float(arc) / (float(totalPoints - 1))
incrementRadians = incrementAngle * mpi
else:
incrementAngle = float(arc) / float(totalPoints)
incrementRadians = incrementAngle * mpi
cxy = []
for p in range(totalPoints):
xp = centerxy[0] + math.sin(startRadians) * circleradius
yp = centerxy[1] + math.cos(startRadians) * circleradius
if direction == 'forward':
startRadians += incrementRadians
else:
startRadians -= incrementRadians
cxy.append((round(xp, roundfact), round(yp, roundfact)))
## print (round(xp,roundfact), round(yp, roundfact))
return cxy
'''
wxPython Custom Widget Collection 20060207
Written By: Edward Flick (eddy -=at=- cdf-imaging -=dot=- com)
Michele Petrazzo (michele -=dot=- petrazzo -=at=- unipex -=dot=- it)
Will Sadkin (wsadkin-=at=- nameconnector -=dot=- com)
Copyright 2006 (c) CDF Inc. ( http://www.cdf-imaging.com )
Contributed to the wxPython project under the wxPython project's license.
'''
# ----------------------------------------------------------------------
def getSmallUpArrowData():
return '\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x10\x00\x00\x00\x10\x08\x06\
\x00\x00\x00\x1f\xf3\xffa\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\
\x00\x00<IDAT8\x8dcddbf\xa0\x040Q\xa4{h\x18\xf0\xff\xdf\xdf\xffd\x1b\x00\xd3\
\x8c\xcf\x10\x9c\x06\xa0k\xc2e\x08m\xc2\x00\x97m\xd8\xc41\x0c \x14h\xe8\xf2\
\x8c\xa3)q\x10\x18\x00\x00R\xd8#\xec\xb2\xcd\xc1Y\x00\x00\x00\x00IEND\xaeB`\
\x82'
def getSmallUpArrowBitmap():
return BitmapFromImage(getSmallUpArrowImage())
def getSmallUpArrowImage():
stream = csio.StringIO(getSmallUpArrowData())
return ImageFromStream(stream)
def getSmallDnArrowData():
return "\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x10\x00\x00\x00\x10\x08\x06\
\x00\x00\x00\x1f\xf3\xffa\x00\x00\x00\x04sBIT\x08\x08\x08\x08|\x08d\x88\x00\
\x00\x00HIDAT8\x8dcddbf\xa0\x040Q\xa4{\xd4\x00\x06\x06\x06\x06\x06\x16t\x81\
\xff\xff\xfe\xfe'\xa4\x89\x91\x89\x99\x11\xa7\x0b\x90%\ti\xc6j\x00>C\xb0\x89\
\xd3.\x10\xd1m\xc3\xe5*\xbc.\x80i\xc2\x17.\x8c\xa3y\x81\x01\x00\xa1\x0e\x04e\
?\x84B\xef\x00\x00\x00\x00IEND\xaeB`\x82"
def getSmallDnArrowBitmap():
return BitmapFromImage(getSmallDnArrowImage())
def getSmallDnArrowImage():
stream = csio.StringIO(getSmallDnArrowData())
return ImageFromStream(stream)
# ----------------------------------------------------------------------
class myListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin):
def __init__(
self, parent, ID=-1, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0
):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
class TextCtrlAutoComplete(wx.TextCtrl, listmix.ColumnSorterMixin):
def __init__(
self,
parent,
colNames=None,
choices=None,
multiChoices=None,
showHead=True,
dropDownClick=True,
colFetch=-1,
colSearch=0,
hideOnNoMatch=True,
selectCallback=None,
entryCallback=None,
matchFunction=None,
**therest,
):
'''
Constructor works just like wx.TextCtrl except you can pass in a
list of choices. You can also change the choice list at any time
by calling setChoices.
'''
if 'style' in therest:
therest['style'] = wx.TE_PROCESS_ENTER | therest['style']
else:
therest['style'] = wx.TE_PROCESS_ENTER
wx.TextCtrl.__init__(self, parent, **therest)
# Some variables
self._dropDownClick = dropDownClick
self._colNames = colNames
self._multiChoices = multiChoices
self._showHead = showHead
self._choices = choices
self._lastinsertionpoint = 0
self._hideOnNoMatch = hideOnNoMatch
self._selectCallback = selectCallback
self._entryCallback = entryCallback
self._matchFunction = matchFunction
self._screenheight = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)
# sort variable needed by listmix
self.itemDataMap = dict()
# Load and sort data
if not (self._multiChoices or self._choices):
raise ValueError("Pass me at least one of multiChoices OR choices")
# widgets
self.dropdown = wx.PopupWindow(self)
# Control the style
flags = wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_SORT_ASCENDING
if not (showHead and multiChoices):
flags = flags | wx.LC_NO_HEADER
# Create the list and bind the events
self.dropdownlistbox = myListCtrl(
self.dropdown, style=flags, pos=wx.Point(0, 0)
)
# initialize the parent
if multiChoices:
ln = len(multiChoices)
else:
ln = 1
# else: ln = len(choices)
listmix.ColumnSorterMixin.__init__(self, ln)
# load the data
if multiChoices:
self.SetMultipleChoices(
multiChoices, colSearch=colSearch, colFetch=colFetch
)
else:
self.SetChoices(choices)
gp = self
while gp != None:
gp.Bind(wx.EVT_MOVE, self.onControlChanged, gp)
gp.Bind(wx.EVT_SIZE, self.onControlChanged, gp)
gp = gp.GetParent()
self.Bind(wx.EVT_KILL_FOCUS, self.onControlChanged, self)
self.Bind(wx.EVT_TEXT, self.onEnteredText, self)
self.Bind(wx.EVT_KEY_DOWN, self.onKeyDown, self)
# If need drop down on left click
if dropDownClick:
self.Bind(wx.EVT_LEFT_DOWN, self.onClickToggleDown, self)
self.Bind(wx.EVT_LEFT_UP, self.onClickToggleUp, self)
self.dropdown.Bind(
wx.EVT_LISTBOX, self.onListItemSelected, self.dropdownlistbox
)
self.dropdownlistbox.Bind(wx.EVT_LEFT_DOWN, self.onListClick)
self.dropdownlistbox.Bind(wx.EVT_LEFT_DCLICK, self.onListDClick)
self.dropdownlistbox.Bind(wx.EVT_LIST_COL_CLICK, self.onListColClick)
# self.il = wx.ImageList(16, 16)
# self.sm_dn = self.il.Add(getSmallDnArrowBitmap())
# self.sm_up = self.il.Add(getSmallUpArrowBitmap())
# self.dropdownlistbox.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
# self._ascending = True
# -- methods called from mixin class
def GetSortImages(self):
return (self.sm_dn, self.sm_up)
def GetListCtrl(self):
return self.dropdownlistbox
# -- event methods
def onListClick(self, evt):
toSel, flag = self.dropdownlistbox.HitTest(evt.GetPosition())
# no values on poition, return
if toSel == -1:
return
self.dropdownlistbox.Select(toSel)
def onListDClick(self, evt):
self._setValueFromSelected()
def onListColClick(self, evt):
col = evt.GetColumn()
# reverse the sort
if col == self._colSearch:
self._ascending = not self._ascending
self.SortListItems(evt.GetColumn(), ascending=self._ascending)
self._colSearch = evt.GetColumn()
evt.Skip()
def onEnteredText(self, event):
text = event.GetString()
if self._entryCallback:
self._entryCallback()
if not text:
# control is empty; hide dropdown if shown:
if self.dropdown.IsShown():
self._showDropDown(False)
event.Skip()
return
found = False
if self._multiChoices:
# load the sorted data into the listbox
dd = self.dropdownlistbox
choices = [
dd.GetItem(x, self._colSearch).GetText()
for x in xrange(dd.GetItemCount())
]
else:
choices = self._choices
for numCh, choice in enumerate(choices):
if self._matchFunction and self._matchFunction(text, choice):
found = True
elif choice.lower().startswith(text.lower()):
found = True
if found:
self._showDropDown(True)
item = self.dropdownlistbox.GetItem(numCh)
toSel = item.GetId()
self.dropdownlistbox.Select(toSel)
break
if not found:
self.dropdownlistbox.Select(
self.dropdownlistbox.GetFirstSelected(), False
)
if self._hideOnNoMatch:
self._showDropDown(False)
self._listItemVisible()
event.Skip()
def onKeyDown(self, event):
""" Do some work when the user press on the keys:
up and down: move the cursor
left and right: move the search
"""
skip = True
sel = self.dropdownlistbox.GetFirstSelected()
visible = self.dropdown.IsShown()
KC = event.GetKeyCode()
if KC == wx.WXK_DOWN:
if sel < (self.dropdownlistbox.GetItemCount() - 1):
self.dropdownlistbox.Select(sel + 1)
self._listItemVisible()
self._showDropDown()
skip = False
elif KC == wx.WXK_UP:
if sel > 0:
self.dropdownlistbox.Select(sel - 1)
self._listItemVisible()
self._showDropDown()
skip = False
elif KC == wx.WXK_LEFT:
if not self._multiChoices:
return
if self._colSearch > 0:
self._colSearch -= 1
self._showDropDown()
elif KC == wx.WXK_RIGHT:
if not self._multiChoices:
return
if self._colSearch < self.dropdownlistbox.GetColumnCount() - 1:
self._colSearch += 1
self._showDropDown()
if visible:
if event.GetKeyCode() == wx.WXK_RETURN:
self._setValueFromSelected()
skip = False
if event.GetKeyCode() == wx.WXK_ESCAPE:
self._showDropDown(False)
skip = False
if skip:
event.Skip()
def onListItemSelected(self, event):
self._setValueFromSelected()
event.Skip()
def onClickToggleDown(self, event):
self._lastinsertionpoint = self.GetInsertionPoint()
event.Skip()
def onClickToggleUp(self, event):
if self.GetInsertionPoint() == self._lastinsertionpoint:
self._showDropDown(not self.dropdown.IsShown())
event.Skip()
def onControlChanged(self, event):
self._showDropDown(False)
event.Skip()
# -- Interfaces methods
def SetMultipleChoices(self, choices, colSearch=0, colFetch=-1):
''' Set multi-column choice
'''
self._multiChoices = choices
self._choices = None
if not isinstance(self._multiChoices, list):
self._multiChoices = [x for x in self._multiChoices]
flags = wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_SORT_ASCENDING
if not self._showHead:
flags |= wx.LC_NO_HEADER
self.dropdownlistbox.SetWindowStyleFlag(flags)
# prevent errors on "old" systems
if sys.version.startswith("2.3"):
self._multiChoices.sort(lambda x, y: cmp(x[0].lower(), y[0].lower()))
else:
self._multiChoices.sort(key=lambda x: locale.strxfrm(x[0]).lower())
self._updateDataList(self._multiChoices)
lChoices = len(choices)
if lChoices < 2:
raise ValuError("You have to pass me a multi-dimension list")
for numCol, rowValues in enumerate(choices[0]):
if self._colNames:
colName = self._colNames[numCol]
else:
colName = "Select %i" % numCol
self.dropdownlistbox.InsertColumn(numCol, colName)
for numRow, valRow in enumerate(choices):
for numCol, colVal in enumerate(valRow):
if numCol == 0:
index = self.dropdownlistbox.InsertImageStringItem(
sys.maxint, colVal, -1
)
self.dropdownlistbox.SetStringItem(index, numCol, colVal)
self.dropdownlistbox.SetItemData(index, numRow)
self._setListSize()
self._colSearch = colSearch
self._colFetch = colFetch
def SetChoices(self, choices):
'''
Sets the choices available in the popup wx.ListBox.
The items will be sorted case insensitively.
'''
self._choices = choices
self._multiChoices = None
flags = (
wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_SORT_ASCENDING | wx.LC_NO_HEADER
)
self.dropdownlistbox.SetWindowStyleFlag(flags)
if not isinstance(choices, list):
self._choices = [x for x in choices]
# prevent errors on "old" systems
if sys.version.startswith("2.3"):
self._choices.sort(lambda x, y: cmp(x.lower(), y.lower()))
else:
self._choices.sort(key=lambda x: locale.strxfrm(x).lower())
self._updateDataList(self._choices)
self.dropdownlistbox.InsertColumn(0, "")
for num, colVal in enumerate(self._choices):
index = self.dropdownlistbox.InsertImageStringItem(
sys.maxint, colVal, -1
)
self.dropdownlistbox.SetStringItem(index, 0, colVal)
self.dropdownlistbox.SetItemData(index, num)
self._setListSize()
# there is only one choice for both search and fetch if setting a single column:
self._colSearch = 0
self._colFetch = -1
def GetChoices(self):
if self._choices:
return self._choices
else:
return self._multiChoices
def SetSelectCallback(self, cb=None):
self._selectCallback = cb
def SetEntryCallback(self, cb=None):
self._entryCallback = cb
def SetMatchFunction(self, mf=None):
self._matchFunction = mf
# -- Internal methods
def _setValueFromSelected(self):
'''
Sets the wx.TextCtrl value from the selected wx.ListCtrl item.
Will do nothing if no item is selected in the wx.ListCtrl.
'''
sel = self.dropdownlistbox.GetFirstSelected()
if sel > -1:
if self._colFetch != -1:
col = self._colFetch
else:
col = self._colSearch
itemtext = self.dropdownlistbox.GetItem(sel, col).GetText()
if self._selectCallback:
dd = self.dropdownlistbox
values = [
dd.GetItem(sel, x).GetText()
for x in xrange(dd.GetColumnCount())
]
self._selectCallback(values)
self.SetValue(itemtext)
self.SetInsertionPointEnd()
self.SetSelection(-1, -1)
self._showDropDown(False)
def _showDropDown(self, show=True):
'''
Either display the drop down list (show = True) or hide it (show = False).
'''
if show:
size = self.dropdown.GetSize()
width, height = self.GetSizeTuple()
x, y = self.ClientToScreenXY(0, height)
if size.GetWidth() != width:
size.SetWidth(width)
self.dropdown.SetSize(size)
self.dropdownlistbox.SetSize(self.dropdown.GetClientSize())
if (y + size.GetHeight()) < self._screenheight:
self.dropdown.SetPosition(wx.Point(x, y))
else:
self.dropdown.SetPosition(
wx.Point(x, y - height - size.GetHeight())
)
self.dropdown.Show(show)
def _listItemVisible(self):
'''
Moves the selected item to the top of the list ensuring it is always visible.
'''
toSel = self.dropdownlistbox.GetFirstSelected()
if toSel == -1:
return
self.dropdownlistbox.EnsureVisible(toSel)
def _updateDataList(self, choices):
# delete, if need, all the previous data
if self.dropdownlistbox.GetColumnCount() != 0:
self.dropdownlistbox.DeleteAllColumns()
self.dropdownlistbox.DeleteAllItems()
# and update the dict
if choices:
for numVal, data in enumerate(choices):
self.itemDataMap[numVal] = data
else:
numVal = 0
self.SetColumnCount(numVal)
def _setListSize(self):
if self._multiChoices:
choices = self._multiChoices
else:
choices = self._choices
longest = 0
for choice in choices:
longest = max(len(choice), longest)
longest += 6
itemcount = min(len(choices), 20) + 2
charheight = self.dropdownlistbox.GetCharHeight()
charwidth = self.dropdownlistbox.GetCharWidth()
self.popupsize = wx.Size(charwidth * longest, charheight * itemcount)
self.dropdownlistbox.SetSize(self.popupsize)
self.dropdown.SetClientSize(self.popupsize)
class MyAUIFrame(wx.Frame):
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.mgr = wx.aui.AuiManager(self)
leftpanel = wx.Panel(self, -1, size=(200, 150))
rightpanel = wx.Panel(self, -1, size=(200, 150))
bottompanel = wx.Panel(self, -1, size=(200, 150))
toppanel = wx.Panel(self, -1, size=(200, 150))
centerpanel = wx.Panel(self, -1, size=(200, 150))
wx.TextCtrl(rightpanel, -1, 'rightpanel')
wx.TextCtrl(leftpanel, -1, 'leftpanel')
wx.TextCtrl(toppanel, -1, 'toppanel')
wx.TextCtrl(bottompanel, -1, 'bottompanel')
wx.TextCtrl(centerpanel, -1, 'centerpanel')
self.mgr.AddPane(leftpanel, wx.aui.AuiPaneInfo().Left().Layer(1))
self.mgr.AddPane(rightpanel, wx.aui.AuiPaneInfo().Right().Layer(1))
self.mgr.AddPane(bottompanel, wx.aui.AuiPaneInfo().Bottom().Layer(2))
self.mgr.AddPane(toppanel, wx.aui.AuiPaneInfo().Top().Layer(2))
self.mgr.AddPane(centerpanel, wx.aui.AuiPaneInfo().Center().Layer(1))
self.Maximize()
self.mgr.Update()
class MyAUIApp(wx.App):
def OnInit(self):
frame = MyAUIFrame(None, -1, 'CBMPy Gen2 GUI')
frame.Show()
self.SetTopWindow(frame)
return 1
def runModelEditor(mod):
app = wx.App(False)
frame = ModelEditor(mod)
frame.Show(True)
app.SetTopWindow(frame)
app.MainLoop()
def runMyAUIApp():
app = MyAUIApp(0)
app.MainLoop()
|
SystemsBioinformatics/cbmpy
|
cbmpy/CBWx.py
|
Python
|
gpl-3.0
| 89,141
|
[
"PySCeS"
] |
bd12b897be4c94f48144ab23b7bc2047a34890a032857f1c170c1da08bb663b6
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008,2010 Gary Burton
# Copyright (C) 2008 Craig J. Anderson
# Copyright (C) 2009 Nick Hall
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
# Copyright (C) 2011-2013 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Specific option handling for a GUI.
"""
from __future__ import unicode_literals
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import os
import sys
#-------------------------------------------------------------------------
#
# gtk modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from ..utils import ProgressMeter
from ..pluginmanager import GuiPluginManager
from .. import widgets
from ..managedwindow import ManagedWindow
from ..dialog import OptionDialog
from ..selectors import SelectorFactory
from gramps.gen.display.name import displayer as _nd
from gramps.gen.filters import GenericFilterFactory, GenericFilter, rules
from gramps.gen.constfunc import (conv_to_unicode, uni_to_gui, get_curr_dir,
STRTYPE, cuni)
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Dialog window used to select a surname
#
#------------------------------------------------------------------------
class LastNameDialog(ManagedWindow):
"""
A dialog that allows the selection of a surname from the database.
"""
def __init__(self, database, uistate, track, surnames, skip_list=set()):
ManagedWindow.__init__(self, uistate, track, self)
flags = Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT, Gtk.STOCK_OK,
Gtk.ResponseType.ACCEPT)
self.__dlg = Gtk.Dialog(None, uistate.window, flags, buttons)
self.__dlg.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
self.set_window(self.__dlg, None, _('Select surname'))
self.window.set_default_size(400, 400)
# build up a container to display all of the people of interest
self.__model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_INT)
self.__tree_view = Gtk.TreeView(self.__model)
col1 = Gtk.TreeViewColumn(_('Surname'), Gtk.CellRendererText(), text=0)
col2 = Gtk.TreeViewColumn(_('Count'), Gtk.CellRendererText(), text=1)
col1.set_resizable(True)
col2.set_resizable(True)
col1.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col2.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col1.set_sort_column_id(0)
col2.set_sort_column_id(1)
self.__tree_view.append_column(col1)
self.__tree_view.append_column(col2)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.add(self.__tree_view)
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled_window.set_shadow_type(Gtk.ShadowType.OUT)
self.__dlg.vbox.pack_start(scrolled_window, True, True, 0)
scrolled_window.show_all()
if len(surnames) == 0:
# we could use database.get_surname_list(), but if we do that
# all we get is a list of names without a count...therefore
# we'll traverse the entire database ourself and build up a
# list that we can use
# for name in database.get_surname_list():
# self.__model.append([name, 0])
# build up the list of surnames, keeping track of the count for each
# name (this can be a lengthy process, so by passing in the
# dictionary we can be certain we only do this once)
progress = ProgressMeter(_('Finding Surnames'))
progress.set_pass(_('Finding surnames'),
database.get_number_of_people())
for person in database.iter_people():
progress.step()
key = person.get_primary_name().get_surname()
count = 0
if key in surnames:
count = surnames[key]
surnames[key] = count + 1
progress.close()
# insert the names and count into the model
for key in surnames:
if key.encode('iso-8859-1','xmlcharrefreplace') not in skip_list:
self.__model.append([key, surnames[key]])
# keep the list sorted starting with the most popular last name
self.__model.set_sort_column_id(1, Gtk.SortType.DESCENDING)
# the "OK" button should be enabled/disabled based on the selection of
# a row
self.__tree_selection = self.__tree_view.get_selection()
self.__tree_selection.set_mode(Gtk.SelectionMode.MULTIPLE)
self.__tree_selection.select_path(0)
def run(self):
"""
Display the dialog and return the selected surnames when done.
"""
response = self.__dlg.run()
surname_set = set()
if response == Gtk.ResponseType.ACCEPT:
(mode, paths) = self.__tree_selection.get_selected_rows()
for path in paths:
i = self.__model.get_iter(path)
surname = self.__model.get_value(i, 0)
surname_set.add(surname)
self.__dlg.destroy()
return surname_set
#-------------------------------------------------------------------------
#
# GuiStringOption class
#
#-------------------------------------------------------------------------
class GuiStringOption(Gtk.Entry):
"""
This class displays an option that is a simple one-line string.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.StringOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.set_text( self.__option.get_value() )
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.connect('changed', self.__text_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __text_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
self.__option.disable_signals()
self.__option.set_value( self.get_text() )
self.__option.enable_signals()
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.handler_block(self.changekey)
self.set_text(self.__option.get_value())
self.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiColorOption class
#
#-------------------------------------------------------------------------
class GuiColorOption(Gtk.ColorButton):
"""
This class displays an option that allows the selection of a colour.
"""
def __init__(self, option, dbstate, uistate, track, override):
self.__option = option
value = self.__option.get_value()
GObject.GObject.__init__(self)
self.set_color(Gdk.color_parse(self.__option.get_value()))
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.connect('color-set', self.__color_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.set_tooltip_text(self.__option.get_help())
def __color_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of color made by the user.
"""
colour = self.get_color()
value = '#%02x%02x%02x' % (
int(colour.red * 256 / 65536),
int(colour.green * 256 / 65536),
int(colour.blue * 256 / 65536))
self.__option.disable_signals()
self.__option.set_value(value)
self.__option.enable_signals()
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.handler_block(self.changekey)
self.set_color(Gdk.color_parse(self.__option.get_value()))
self.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiNumberOption class
#
#-------------------------------------------------------------------------
class GuiNumberOption(Gtk.SpinButton):
"""
This class displays an option that is a simple number with defined maximum
and minimum values.
"""
def __init__(self, option, dbstate, uistate, track, override):
self.__option = option
decimals = 0
step = self.__option.get_step()
adj = Gtk.Adjustment(1,
self.__option.get_min(),
self.__option.get_max(),
step)
# Calculate the number of decimal places if necessary
if step < 1:
import math
decimals = int(math.log10(step) * -1)
GObject.GObject.__init__(self, adjustment=adj, climb_rate=1, digits=decimals)
Gtk.SpinButton.set_numeric(self, True)
self.set_value(self.__option.get_value())
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.connect('value_changed', self.__number_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __number_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
vtype = type(self.__option.get_value())
self.__option.set_value( vtype(self.get_value()) )
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.handler_block(self.changekey)
self.set_value(self.__option.get_value())
self.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiTextOption class
#
#-------------------------------------------------------------------------
class GuiTextOption(Gtk.ScrolledWindow):
"""
This class displays an option that is a multi-line string.
"""
def __init__(self, option, dbstate, uistate, track, override):
self.__option = option
GObject.GObject.__init__(self)
self.set_shadow_type(Gtk.ShadowType.IN)
self.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
# Add a TextView
value = self.__option.get_value()
gtext = Gtk.TextView()
gtext.set_size_request(-1, 70)
gtext.get_buffer().set_text("\n".join(value))
gtext.set_editable(1)
gtext.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)
self.add(gtext)
self.__buff = gtext.get_buffer()
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.bufcon = self.__buff.connect('changed', self.__text_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
# Required for tooltip
gtext.add_events(Gdk.EventMask.ENTER_NOTIFY_MASK)
gtext.add_events(Gdk.EventMask.LEAVE_NOTIFY_MASK)
gtext.set_tooltip_text(self.__option.get_help())
def __text_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
text_val = cuni( self.__buff.get_text( self.__buff.get_start_iter(),
self.__buff.get_end_iter(),
False) )
self.__option.disable_signals()
self.__option.set_value( text_val.split('\n') )
self.__option.enable_signals()
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.__buff.handler_block(self.bufcon)
value = self.__option.get_value()
# Can only set using a string. If we have a string value,
# we'll use that. If not, we'll assume a list and convert
# it into a single string by assuming each list element
# is separated by a newline.
if isinstance(value, STRTYPE):
self.__buff.set_text(value)
# Need to manually call the other handler so that the option
# value is changed to be a list. If left as a string,
# it would be treated as a list, meaning each character
# becomes a list element -- not what we want.
self.__text_changed(None)
else:
self.__buff.set_text("\n".join(value))
self.__buff.handler_unblock(self.bufcon)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
self.__buff.disconnect(self.bufcon)
self.__buff = None
#-------------------------------------------------------------------------
#
# GuiBooleanOption class
#
#-------------------------------------------------------------------------
class GuiBooleanOption(Gtk.CheckButton):
"""
This class displays an option that is a boolean (True or False).
"""
def __init__(self, option, dbstate, uistate, track, override):
self.__option = option
GObject.GObject.__init__(self)
self.set_label(self.__option.get_label())
self.set_active(self.__option.get_value())
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.connect('toggled', self.__state_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __state_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
self.__option.set_value( self.get_active() )
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.handler_block(self.changekey)
self.set_active(self.__option.get_value())
self.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiEnumeratedListOption class
#
#-------------------------------------------------------------------------
class GuiEnumeratedListOption(Gtk.HBox):
"""
This class displays an option that provides a finite number of values.
Each possible value is assigned a value and a description.
"""
def __init__(self, option, dbstate, uistate, track, override):
GObject.GObject.__init__(self)
evtBox = Gtk.EventBox()
self.__option = option
self.__combo = Gtk.ComboBoxText()
if len(option.get_items()) > 18:
self.__combo.set_popup_fixed_width(False)
self.__combo.set_wrap_width(3)
evtBox.add(self.__combo)
self.pack_start(evtBox, True, True, 0)
self.__update_options()
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.__combo.connect('changed', self.__selection_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey1 = self.__option.connect('options-changed', self.__update_options)
self.conkey2 = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __selection_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
index = self.__combo.get_active()
if index < 0:
return
items = self.__option.get_items()
value, description = items[index] # IGNORE:W0612 - description is unused
# Don't disable the __option signals as is normally done for
# the other widgets or bad things happen (like other needed
# signals don't fire)
self.__option.set_value( value )
self.value_changed() # Allow overriding so that another class
# can add functionality
def value_changed(self):
pass
def __update_options(self):
"""
Handle the change of the available options.
"""
self.__combo.remove_all()
#self.__combo.get_model().clear()
cur_val = self.__option.get_value()
active_index = 0
current_index = 0
for (value, description) in self.__option.get_items():
self.__combo.append_text(description)
if value == cur_val:
active_index = current_index
current_index += 1
self.__combo.set_active( active_index )
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.__combo.handler_block(self.changekey)
self.__update_options()
self.__combo.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey1)
self.__option.disconnect(self.conkey2)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiPersonOption class
#
#-------------------------------------------------------------------------
class GuiPersonOption(Gtk.HBox):
"""
This class displays an option that allows a person from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.PersonOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.__person_label = Gtk.Label()
self.__person_label.set_alignment(0.0, 0.5)
pevt = Gtk.EventBox()
pevt.add(self.__person_label)
person_button = widgets.SimpleButton(Gtk.STOCK_INDEX,
self.__get_person_clicked)
person_button.set_relief(Gtk.ReliefStyle.NORMAL)
self.pack_start(pevt, False, True, 0)
self.pack_end(person_button, False, True, 0)
gid = self.__option.get_value()
# Pick up the active person
person_handle = self.__uistate.get_active('Person')
person = self.__dbstate.db.get_person_from_handle(person_handle)
if override or not person:
# Pick up the stored option value if there is one
person = self.__db.get_person_from_gramps_id(gid)
if not person:
# If all else fails, get the default person to avoid bad values
person = self.__db.get_default_person()
if not person:
person = self.__db.find_initial_person()
self.__update_person(person)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
pevt.set_tooltip_text(self.__option.get_help())
person_button.set_tooltip_text(_('Select a different person'))
def __get_person_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the button to choose a different person.
"""
# Create a filter for the person selector.
rfilter = GenericFilter()
rfilter.set_logical_op('or')
rfilter.add_rule(rules.person.IsBookmarked([]))
rfilter.add_rule(rules.person.HasIdOf([self.__option.get_value()]))
# Add the database home person if one exists.
default_person = self.__db.get_default_person()
if default_person:
gid = default_person.get_gramps_id()
rfilter.add_rule(rules.person.HasIdOf([gid]))
# Add the selected person if one exists.
person_handle = self.__uistate.get_active('Person')
active_person = self.__dbstate.db.get_person_from_handle(person_handle)
if active_person:
gid = active_person.get_gramps_id()
rfilter.add_rule(rules.person.HasIdOf([gid]))
select_class = SelectorFactory('Person')
sel = select_class(self.__dbstate, self.__uistate, self.__track,
title=_('Select a person for the report'),
filter=rfilter )
person = sel.run()
self.__update_person(person)
def __update_person(self, person):
"""
Update the currently selected person.
"""
if person:
name = _nd.display(person)
gid = person.get_gramps_id()
self.__person_label.set_text( "%s (%s)" % (name, gid) )
self.__option.set_value(gid)
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
gid = self.__option.get_value()
name = _nd.display(self.__db.get_person_from_gramps_id(gid))
self.__person_label.set_text("%s (%s)" % (name, gid))
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiFamilyOption class
#
#-------------------------------------------------------------------------
class GuiFamilyOption(Gtk.HBox):
"""
This class displays an option that allows a family from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.FamilyOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.__family_label = Gtk.Label()
self.__family_label.set_alignment(0.0, 0.5)
pevt = Gtk.EventBox()
pevt.add(self.__family_label)
family_button = widgets.SimpleButton(Gtk.STOCK_INDEX,
self.__get_family_clicked)
family_button.set_relief(Gtk.ReliefStyle.NORMAL)
self.pack_start(pevt, False, True, 0)
self.pack_end(family_button, False, True, 0)
self.__initialize_family(override)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
pevt.set_tooltip_text(self.__option.get_help())
family_button.set_tooltip_text(_('Select a different family'))
def __initialize_family(self, override):
"""
Find a family to initialize the option with. If there is no specified
family, try to find a family that the user is likely interested in.
"""
family_list = []
fid = self.__option.get_value()
fid_family = self.__db.get_family_from_gramps_id(fid)
active_family = self.__uistate.get_active('Family')
if override and fid_family:
# Use the stored option value if there is one
family_list = [fid_family.get_handle()]
if active_family and not family_list:
# Use the active family if one is selected
family_list = [active_family]
if not family_list:
# Next try the family of the active person
person_handle = self.__uistate.get_active('Person')
person = self.__dbstate.db.get_person_from_handle(person_handle)
if person:
family_list = person.get_family_handle_list()
if fid_family and not family_list:
# Next try the stored option value if there is one
family_list = [fid_family.get_handle()]
if not family_list:
# Next try the family of the default person in the database.
person = self.__db.get_default_person()
if person:
family_list = person.get_family_handle_list()
if not family_list:
# Finally, take any family you can find.
for family in self.__db.iter_family_handles():
self.__update_family(family)
break
else:
self.__update_family(family_list[0])
def __get_family_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the button to choose a different family.
"""
# Create a filter for the person selector.
rfilter = GenericFilterFactory('Family')()
rfilter.set_logical_op('or')
# Add the current family
rfilter.add_rule(rules.family.HasIdOf([self.__option.get_value()]))
# Add all bookmarked families
rfilter.add_rule(rules.family.IsBookmarked([]))
# Add the families of the database home person if one exists.
default_person = self.__db.get_default_person()
if default_person:
family_list = default_person.get_family_handle_list()
for family_handle in family_list:
family = self.__db.get_family_from_handle(family_handle)
gid = family.get_gramps_id()
rfilter.add_rule(rules.family.HasIdOf([gid]))
# Add the families of the selected person if one exists.
# Same code as above one ! See bug #5032 feature request #5038
### active_person = self.__uistate.get_active('Person') ###
#active_person = self.__db.get_default_person()
#if active_person:
#family_list = active_person.get_family_handle_list()
#for family_handle in family_list:
#family = self.__db.get_family_from_handle(family_handle)
#gid = family.get_gramps_id()
#rfilter.add_rule(rules.family.HasIdOf([gid]))
select_class = SelectorFactory('Family')
sel = select_class(self.__dbstate, self.__uistate, self.__track,
filter=rfilter )
family = sel.run()
if family:
self.__update_family(family.get_handle())
def __update_family(self, handle):
"""
Update the currently selected family.
"""
if handle:
family = self.__dbstate.db.get_family_from_handle(handle)
family_id = family.get_gramps_id()
fhandle = family.get_father_handle()
mhandle = family.get_mother_handle()
if fhandle:
father = self.__db.get_person_from_handle(fhandle)
father_name = _nd.display(father)
else:
father_name = _("unknown father")
if mhandle:
mother = self.__db.get_person_from_handle(mhandle)
mother_name = _nd.display(mother)
else:
mother_name = _("unknown mother")
name = _("%(father_name)s and %(mother_name)s (%(family_id)s)") % {
'father_name': father_name,
'mother_name': mother_name,
'family_id': family_id}
self.__family_label.set_text( name )
self.__option.set_value(family_id)
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
fid = self.__option.get_value()
handle = self.__db.get_family_from_gramps_id(fid).get_handle()
# Need to disable signals as __update_family() calls set_value()
# which would launch the 'value-changed' signal which is what
# we are reacting to here in the first place (don't need the
# signal repeated)
self.__option.disable_signals()
self.__update_family(handle)
self.__option.enable_signals()
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiNoteOption class
#
#-------------------------------------------------------------------------
class GuiNoteOption(Gtk.HBox):
"""
This class displays an option that allows a note from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.NoteOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.__note_label = Gtk.Label()
self.__note_label.set_alignment(0.0, 0.5)
pevt = Gtk.EventBox()
pevt.add(self.__note_label)
note_button = widgets.SimpleButton(Gtk.STOCK_INDEX,
self.__get_note_clicked)
note_button.set_relief(Gtk.ReliefStyle.NORMAL)
self.pack_start(pevt, False, True, 0)
self.pack_end(note_button, False, True, 0)
# Initialize to the current value
nid = self.__option.get_value()
note = self.__db.get_note_from_gramps_id(nid)
self.__update_note(note)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
pevt.set_tooltip_text(self.__option.get_help())
note_button.set_tooltip_text(_('Select an existing note'))
def __get_note_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the button to choose a different note.
"""
select_class = SelectorFactory('Note')
sel = select_class(self.__dbstate, self.__uistate, self.__track)
note = sel.run()
self.__update_note(note)
def __update_note(self, note):
"""
Update the currently selected note.
"""
if note:
note_id = note.get_gramps_id()
txt = " ".join(note.get().split())
if len(txt) > 35:
txt = txt[:35] + "..."
txt = "%s [%s]" % (txt, note_id)
self.__note_label.set_text( txt )
self.__option.set_value(note_id)
else:
txt = "<i>%s</i>" % _('No note given, click button to select one')
self.__note_label.set_text( txt )
self.__note_label.set_use_markup(True)
self.__option.set_value("")
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
nid = self.__option.get_value()
note = self.__db.get_note_from_gramps_id(nid)
# Need to disable signals as __update_note() calls set_value()
# which would launch the 'value-changed' signal which is what
# we are reacting to here in the first place (don't need the
# signal repeated)
self.__option.disable_signals()
self.__update_note(note)
self.__option.enable_signals()
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiMediaOption class
#
#-------------------------------------------------------------------------
class GuiMediaOption(Gtk.HBox):
"""
This class displays an option that allows a media object from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.MediaOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.__media_label = Gtk.Label()
self.__media_label.set_alignment(0.0, 0.5)
pevt = Gtk.EventBox()
pevt.add(self.__media_label)
media_button = widgets.SimpleButton(Gtk.STOCK_INDEX,
self.__get_media_clicked)
media_button.set_relief(Gtk.ReliefStyle.NORMAL)
self.pack_start(pevt, False, True, 0)
self.pack_end(media_button, False, True, 0)
# Initialize to the current value
mid = self.__option.get_value()
media = self.__db.get_object_from_gramps_id(mid)
self.__update_media(media)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
pevt.set_tooltip_text(self.__option.get_help())
media_button.set_tooltip_text(_('Select an existing media object'))
def __get_media_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the button to choose a different note.
"""
select_class = SelectorFactory('MediaObject')
sel = select_class(self.__dbstate, self.__uistate, self.__track)
media = sel.run()
self.__update_media(media)
def __update_media(self, media):
"""
Update the currently selected media.
"""
if media:
media_id = media.get_gramps_id()
txt = "%s [%s]" % (media.get_description(), media_id)
self.__media_label.set_text( txt )
self.__option.set_value(media_id)
else:
txt = "<i>%s</i>" % _('No image given, click button to select one')
self.__media_label.set_text( txt )
self.__media_label.set_use_markup(True)
self.__option.set_value("")
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
mid = self.__option.get_value()
media = self.__db.get_object_from_gramps_id(mid)
# Need to disable signals as __update_media() calls set_value()
# which would launch the 'value-changed' signal which is what
# we are reacting to here in the first place (don't need the
# signal repeated)
self.__option.disable_signals()
self.__update_media(media)
self.__option.enable_signals()
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiPersonListOption class
#
#-------------------------------------------------------------------------
class GuiPersonListOption(Gtk.HBox):
"""
This class displays a widget that allows multiple people from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.PersonListOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.set_size_request(150, 150)
self.__model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING)
self.__tree_view = Gtk.TreeView(self.__model)
col1 = Gtk.TreeViewColumn(_('Name' ), Gtk.CellRendererText(), text=0)
col2 = Gtk.TreeViewColumn(_('ID' ), Gtk.CellRendererText(), text=1)
col1.set_resizable(True)
col2.set_resizable(True)
col1.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col2.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col1.set_sort_column_id(0)
col2.set_sort_column_id(1)
self.__tree_view.append_column(col1)
self.__tree_view.append_column(col2)
self.__scrolled_window = Gtk.ScrolledWindow()
self.__scrolled_window.add(self.__tree_view)
self.__scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
self.__scrolled_window.set_shadow_type(Gtk.ShadowType.OUT)
self.pack_start(self.__scrolled_window, True, True, 0)
self.__value_changed()
# now setup the '+' and '-' pushbutton for adding/removing people from
# the container
self.__add_person = widgets.SimpleButton(Gtk.STOCK_ADD,
self.__add_person_clicked)
self.__del_person = widgets.SimpleButton(Gtk.STOCK_REMOVE,
self.__del_person_clicked)
self.__vbbox = Gtk.VButtonBox()
self.__vbbox.add(self.__add_person)
self.__vbbox.add(self.__del_person)
self.__vbbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.pack_end(self.__vbbox, False, False, 0)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__tree_view.set_tooltip_text(self.__option.get_help())
def __add_person_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the add person button.
"""
# people we already have must be excluded
# so we don't list them multiple times
skip_list = set()
i = self.__model.get_iter_first()
while (i):
gid = self.__model.get_value(i, 1) # get the GID stored in column #1
person = self.__db.get_person_from_gramps_id(gid)
skip_list.add(person.get_handle())
i = self.__model.iter_next(i)
select_class = SelectorFactory('Person')
sel = select_class(self.__dbstate, self.__uistate,
self.__track, skip=skip_list)
person = sel.run()
if person:
name = _nd.display(person)
gid = person.get_gramps_id()
self.__model.append([name, gid])
# if this person has a spouse, ask if we should include the spouse
# in the list of "people of interest"
#
# NOTE: we may want to make this an optional thing, determined
# by the use of a parameter at the time this class is instatiated
family_list = person.get_family_handle_list()
for family_handle in family_list:
family = self.__db.get_family_from_handle(family_handle)
if person.get_handle() == family.get_father_handle():
spouse_handle = family.get_mother_handle()
else:
spouse_handle = family.get_father_handle()
if spouse_handle and (spouse_handle not in skip_list):
spouse = self.__db.get_person_from_handle(
spouse_handle)
spouse_name = _nd.display(spouse)
text = _('Also include %s?') % spouse_name
prompt = OptionDialog(_('Select Person'),
text,
_('No'), None,
_('Yes'), None)
if prompt.get_response() == Gtk.ResponseType.YES:
gid = spouse.get_gramps_id()
self.__model.append([spouse_name, gid])
self.__update_value()
def __del_person_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the delete person button.
"""
(path, column) = self.__tree_view.get_cursor()
if (path):
i = self.__model.get_iter(path)
self.__model.remove(i)
self.__update_value()
def __update_value(self):
"""
Parse the object and return.
"""
gidlist = ''
i = self.__model.get_iter_first()
while (i):
gid = self.__model.get_value(i, 1)
gidlist = gidlist + gid + ' '
i = self.__model.iter_next(i)
# Supress signals so that the set_value() handler
# (__value_changed()) doesn't get called
self.__option.disable_signals()
self.__option.set_value(gidlist)
self.__option.enable_signals()
def __value_changed(self):
"""
Handle the change made programmatically
"""
value = self.__option.get_value()
if not isinstance(value, STRTYPE):
# Convert array into a string
# (convienence so that programmers can
# set value using a list)
value = " ".join(value)
# Need to change __option value to be the string
self.__option.disable_signals()
self.__option.set_value(value)
self.__option.enable_signals()
# Remove all entries (the new values will REPLACE
# rather than APPEND)
self.__model.clear()
for gid in value.split():
person = self.__db.get_person_from_gramps_id(gid)
if person:
name = _nd.display(person)
self.__model.append([name, gid])
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiPlaceListOption class
#
#-------------------------------------------------------------------------
class GuiPlaceListOption(Gtk.HBox):
"""
This class displays a widget that allows multiple places from the
database to be selected.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.PlaceListOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.set_size_request(150, 150)
self.__model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING)
self.__tree_view = Gtk.TreeView(self.__model)
col1 = Gtk.TreeViewColumn(_('Place' ), Gtk.CellRendererText(), text=0)
col2 = Gtk.TreeViewColumn(_('ID' ), Gtk.CellRendererText(), text=1)
col1.set_resizable(True)
col2.set_resizable(True)
col1.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col2.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col1.set_sort_column_id(0)
col2.set_sort_column_id(1)
self.__tree_view.append_column(col1)
self.__tree_view.append_column(col2)
self.__scrolled_window = Gtk.ScrolledWindow()
self.__scrolled_window.add(self.__tree_view)
self.__scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
self.__scrolled_window.set_shadow_type(Gtk.ShadowType.OUT)
self.pack_start(self.__scrolled_window, True, True, 0)
self.__value_changed()
# now setup the '+' and '-' pushbutton for adding/removing places from
# the container
self.__add_place = widgets.SimpleButton(Gtk.STOCK_ADD,
self.__add_place_clicked)
self.__del_place = widgets.SimpleButton(Gtk.STOCK_REMOVE,
self.__del_place_clicked)
self.__vbbox = Gtk.VButtonBox()
self.__vbbox.add(self.__add_place)
self.__vbbox.add(self.__del_place)
self.__vbbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.pack_end(self.__vbbox, False, False, 0)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__tree_view.set_tooltip_text(self.__option.get_help())
def __add_place_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the add place button.
"""
# places we already have must be excluded
# so we don't list them multiple times
skip_list = set()
i = self.__model.get_iter_first()
while (i):
gid = self.__model.get_value(i, 1) # get the GID stored in column #1
place = self.__db.get_place_from_gramps_id(gid)
skip_list.add(place.get_handle())
i = self.__model.iter_next(i)
select_class = SelectorFactory('Place')
sel = select_class(self.__dbstate, self.__uistate,
self.__track, skip=skip_list)
place = sel.run()
if place:
place_name = place.get_title()
gid = place.get_gramps_id()
self.__model.append([place_name, gid])
self.__update_value()
def __del_place_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the delete place button.
"""
(path, column) = self.__tree_view.get_cursor()
if (path):
i = self.__model.get_iter(path)
self.__model.remove(i)
self.__update_value()
def __update_value(self):
"""
Parse the object and return.
"""
gidlist = ''
i = self.__model.get_iter_first()
while (i):
gid = self.__model.get_value(i, 1)
gidlist = gidlist + gid + ' '
i = self.__model.iter_next(i)
self.__option.set_value(gidlist)
def __value_changed(self):
"""
Handle the change made programmatically
"""
value = self.__option.get_value()
if not isinstance(value, STRTYPE):
# Convert array into a string
# (convienence so that programmers can
# set value using a list)
value = " ".join(value)
# Need to change __option value to be the string
self.__option.disable_signals()
self.__option.set_value(value)
self.__option.enable_signals()
# Remove all entries (the new values will REPLACE
# rather than APPEND)
self.__model.clear()
for gid in value.split():
place = self.__db.get_place_from_gramps_id(gid)
if place:
place_name = place.get_title()
self.__model.append([place_name, gid])
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiSurnameColorOption class
#
#-------------------------------------------------------------------------
class GuiSurnameColorOption(Gtk.HBox):
"""
This class displays a widget that allows multiple surnames to be
selected from the database, and to assign a colour (not necessarily
unique) to each one.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.SurnameColorOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__dbstate = dbstate
self.__db = dbstate.get_database()
self.__uistate = uistate
self.__track = track
self.set_size_request(150, 150)
# This will get populated the first time the dialog is run,
# and used each time after.
self.__surnames = {} # list of surnames and count
self.__model = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING)
self.__tree_view = Gtk.TreeView(self.__model)
self.__tree_view.connect('row-activated', self.__row_clicked)
col1 = Gtk.TreeViewColumn(_('Surname'), Gtk.CellRendererText(), text=0)
col2 = Gtk.TreeViewColumn(_('Color'), Gtk.CellRendererText(), text=1)
col1.set_resizable(True)
col2.set_resizable(True)
col1.set_sort_column_id(0)
col1.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col2.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
self.__tree_view.append_column(col1)
self.__tree_view.append_column(col2)
self.scrolled_window = Gtk.ScrolledWindow()
self.scrolled_window.add(self.__tree_view)
self.scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC)
self.scrolled_window.set_shadow_type(Gtk.ShadowType.OUT)
self.pack_start(self.scrolled_window, True, True, 0)
self.add_surname = widgets.SimpleButton(Gtk.STOCK_ADD,
self.__add_clicked)
self.del_surname = widgets.SimpleButton(Gtk.STOCK_REMOVE,
self.__del_clicked)
self.vbbox = Gtk.VButtonBox()
self.vbbox.add(self.add_surname)
self.vbbox.add(self.del_surname)
self.vbbox.set_layout(Gtk.ButtonBoxStyle.SPREAD)
self.pack_end(self.vbbox, False, False, 0)
self.__value_changed()
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__tree_view.set_tooltip_text(self.__option.get_help())
def __add_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the add surname button.
"""
skip_list = set()
i = self.__model.get_iter_first()
while (i):
surname = self.__model.get_value(i, 0)
skip_list.add(surname.encode('iso-8859-1','xmlcharrefreplace'))
i = self.__model.iter_next(i)
ln_dialog = LastNameDialog(self.__db, self.__uistate,
self.__track, self.__surnames, skip_list)
surname_set = ln_dialog.run()
for surname in surname_set:
self.__model.append([surname, '#ffffff'])
self.__update_value()
def __del_clicked(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the delete surname button.
"""
(path, column) = self.__tree_view.get_cursor()
if (path):
i = self.__model.get_iter(path)
self.__model.remove(i)
self.__update_value()
def __row_clicked(self, treeview, path, column):
"""
Handle the case of a row being clicked on.
"""
# get the surname and colour value for this family
i = self.__model.get_iter(path)
surname = self.__model.get_value(i, 0)
colour = Gdk.color_parse(self.__model.get_value(i, 1))
title = _('Select color for %s') % surname
colour_dialog = Gtk.ColorSelectionDialog(title)
colorsel = colour_dialog.get_color_selection()
colorsel.set_current_color(colour)
response = colour_dialog.run()
if response == Gtk.ResponseType.OK:
colour = colorsel.get_current_color()
colour_name = '#%02x%02x%02x' % (
int(colour.red *256/65536),
int(colour.green*256/65536),
int(colour.blue *256/65536))
self.__model.set_value(i, 1, colour_name)
colour_dialog.destroy()
self.__update_value()
def __update_value(self):
"""
Parse the object and return.
"""
surname_colours = ''
i = self.__model.get_iter_first()
while (i):
surname = self.__model.get_value(i, 0)
#surname = surname.encode('iso-8859-1','xmlcharrefreplace')
colour = self.__model.get_value(i, 1)
# Tried to use a dictionary, and tried to save it as a tuple,
# but coulnd't get this to work right -- this is lame, but now
# the surnames and colours are saved as a plain text string
#
# Hmmm...putting whitespace between the fields causes
# problems when the surname has whitespace -- for example,
# with surnames like "Del Monte". So now we insert a non-
# whitespace character which is unlikely to appear in
# a surname. (See bug report #2162.)
surname_colours += surname + '\xb0' + colour + '\xb0'
i = self.__model.iter_next(i)
self.__option.set_value( surname_colours )
def __value_changed(self):
"""
Handle the change made programmatically
"""
value = self.__option.get_value()
if not isinstance(value, STRTYPE):
# Convert dictionary into a string
# (convienence so that programmers can
# set value using a dictionary)
value_str = ""
for name in value:
value_str += "%s\xb0%s\xb0" % (name, value[name])
value = value_str
# Need to change __option value to be the string
self.__option.disable_signals()
self.__option.set_value(value)
self.__option.enable_signals()
# Remove all entries (the new values will REPLACE
# rather than APPEND)
self.__model.clear()
# populate the surname/colour treeview
#
# For versions prior to 3.0.2, the fields were delimited with
# whitespace. However, this causes problems when the surname
# also has a space within it. When populating the control,
# support both the new and old format -- look for the \xb0
# delimiter, and if it isn't there, assume this is the old-
# style space-delimited format. (Bug #2162.)
if (value.find('\xb0') >= 0):
tmp = value.split('\xb0')
else:
tmp = value.split(' ')
while len(tmp) > 1:
surname = tmp.pop(0)
colour = tmp.pop(0)
self.__model.append([surname, colour])
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiDestinationOption class
#
#-------------------------------------------------------------------------
class GuiDestinationOption(Gtk.HBox):
"""
This class displays an option that allows the user to select a
DestinationOption.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.DestinationOption
@return: nothing
"""
GObject.GObject.__init__(self)
self.__option = option
self.__entry = Gtk.Entry()
self.__entry.set_text( self.__option.get_value() )
self.__button = Gtk.Button()
img = Gtk.Image()
img.set_from_stock(Gtk.STOCK_OPEN, Gtk.IconSize.BUTTON)
self.__button.add(img)
self.__button.connect('clicked', self.__select_file)
self.pack_start(self.__entry, True, True, 0)
self.pack_end(self.__button, False, False, 0)
# Set up signal handlers when the widget value is changed
# from user interaction or programmatically. When handling
# a specific signal, we need to temporarily block the signal
# that would call the other signal handler.
self.changekey = self.__entry.connect('changed', self.__text_changed)
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.conkey1 = self.__option.connect('options-changed', self.__option_changed)
self.conkey2 = self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __option_changed(self):
"""
Handle a change of the option.
"""
extension = self.__option.get_extension()
directory = self.__option.get_directory_entry()
value = self.__option.get_value()
if not directory and not value.endswith(extension):
value = value + extension
self.__option.set_value(value)
elif directory and value.endswith(extension):
value = value[:-len(extension)]
self.__option.set_value(value)
self.__entry.set_text( self.__option.get_value() )
def __select_file(self, obj):
"""
Handle the user's request to select a file (or directory).
"""
if self.__option.get_directory_entry():
my_action = Gtk.FileChooserAction.SELECT_FOLDER
else:
my_action = Gtk.FileChooserAction.SAVE
fcd = Gtk.FileChooserDialog(_("Save As"), action=my_action,
buttons=(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN,
Gtk.ResponseType.OK))
name = os.path.abspath(self.__option.get_value())
if self.__option.get_directory_entry():
while not os.path.isdir(name):
# Keep looking up levels to find a valid drive.
name, tail = os.path.split(name)
if not name:
# Avoid infinite loops
name = get_curr_dir
fcd.set_current_folder(name)
else:
fcd.set_current_name(name)
status = fcd.run()
if status == Gtk.ResponseType.OK:
path = conv_to_unicode(fcd.get_filename())
if path:
if not self.__option.get_directory_entry() and \
not path.endswith(self.__option.get_extension()):
path = path + self.__option.get_extension()
self.__entry.set_text(uni_to_gui(path))
self.__option.set_value(path)
fcd.destroy()
def __text_changed(self, obj): # IGNORE:W0613 - obj is unused
"""
Handle the change of the value made by the user.
"""
self.__option.disable_signals()
self.__option.set_value( self.__entry.get_text() )
self.__option.enable_signals()
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
self.__entry.handler_block(self.changekey)
self.__entry.set_text(self.__option.get_value())
self.__entry.handler_unblock(self.changekey)
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option.disconnect(self.conkey1)
self.__option.disconnect(self.conkey2)
self.__option = None
#-------------------------------------------------------------------------
#
# GuiStyleOption class
#
#-------------------------------------------------------------------------
class GuiStyleOption(GuiEnumeratedListOption):
"""
This class displays a StyleOption.
"""
def __init__(self, option, dbstate, uistate, track, override):
"""
@param option: The option to display.
@type option: gen.plug.menu.StyleOption
@return: nothing
"""
GuiEnumeratedListOption.__init__(self, option, dbstate,
uistate, track)
self.__option = option
self.__button = Gtk.Button("%s..." % _("Style Editor"))
self.__button.connect('clicked', self.__on_style_edit_clicked)
self.pack_end(self.__button, False, False)
def __on_style_edit_clicked(self, *obj):
"""The user has clicked on the 'Edit Styles' button. Create a
style sheet editor object and let them play. When they are
done, update the displayed styles."""
from gramps.gen.plug.docgen import StyleSheetList
from .report._styleeditor import StyleListDisplay
style_list = StyleSheetList(self.__option.get_style_file(),
self.__option.get_default_style())
StyleListDisplay(style_list, None, None)
new_items = []
for style_name in style_list.get_style_names():
new_items.append( (style_name, style_name) )
self.__option.set_items(new_items)
#-------------------------------------------------------------------------
#
# GuiBooleanListOption class
#
#-------------------------------------------------------------------------
class GuiBooleanListOption(Gtk.HBox):
"""
This class displays an option that provides a list of check boxes.
Each possible value is assigned a value and a description.
"""
def __init__(self, option, dbstate, uistate, track, override):
GObject.GObject.__init__(self)
self.__option = option
self.__cbutton = []
default = option.get_value().split(',')
if len(default) < 15:
COLUMNS = 2 # number of checkbox columns
else:
COLUMNS = 3
column = []
for i in range(COLUMNS):
vbox = Gtk.VBox()
self.pack_start(vbox, True, True, 0)
column.append(vbox)
vbox.show()
counter = 0
this_column_counter = 0
ncolumn = 0
for description in option.get_descriptions():
button = Gtk.CheckButton(label=description)
self.__cbutton.append(button)
if counter < len(default):
if default[counter] == 'True':
button.set_active(True)
button.connect("toggled", self.__list_changed)
# show the items vertically, not alternating left and right
# (if the number is uneven, the left column(s) will have one more)
column[ncolumn].pack_start(button, True, True, 0)
button.show()
counter += 1
this_column_counter += 1
this_column_gets = (len(default)+(COLUMNS-(ncolumn+1))) // COLUMNS
if this_column_counter + 1 > this_column_gets:
ncolumn += 1
this_column_counter = 0
self.valuekey = self.__option.connect('value-changed', self.__value_changed)
self.__option.connect('avail-changed', self.__update_avail)
self.__update_avail()
self.set_tooltip_text(self.__option.get_help())
def __list_changed(self, button):
"""
Handle the change of the value made by the user.
"""
value = ''
for button in self.__cbutton:
value = value + str(button.get_active()) + ','
value = value[:len(value)-1]
self.__option.disable_signals()
self.__option.set_value(value)
self.__option.enable_signals()
def __update_avail(self):
"""
Update the availability (sensitivity) of this widget.
"""
avail = self.__option.get_available()
self.set_sensitive(avail)
def __value_changed(self):
"""
Handle the change made programmatically
"""
value = self.__option.get_value()
self.__option.disable_signals()
for button in self.__cbutton:
for key in value:
if key == button.get_label():
bool_value = (value[key] == "True" or value[key] == True)
button.set_active(bool_value)
# Update __option value so that it's correct
self.__list_changed(None)
self.__option.enable_signals()
def clean_up(self):
"""
remove stuff that blocks garbage collection
"""
self.__option.disconnect(self.valuekey)
self.__option = None
#-----------------------------------------------------------------------------#
# #
# Table mapping menu types to gui widgets used in make_gui_option function #
# #
#-----------------------------------------------------------------------------#
from gramps.gen.plug import menu as menu
_OPTIONS = (
(menu.BooleanListOption, True, GuiBooleanListOption),
(menu.BooleanOption, False, GuiBooleanOption),
(menu.ColorOption, True, GuiColorOption),
(menu.DestinationOption, True, GuiDestinationOption),
(menu.EnumeratedListOption, True, GuiEnumeratedListOption),
(menu.FamilyOption, True, GuiFamilyOption),
(menu.MediaOption, True, GuiMediaOption),
(menu.NoteOption, True, GuiNoteOption),
(menu.NumberOption, True, GuiNumberOption),
(menu.PersonListOption, True, GuiPersonListOption),
(menu.PersonOption, True, GuiPersonOption),
(menu.PlaceListOption, True, GuiPlaceListOption),
(menu.StringOption, True, GuiStringOption),
(menu.StyleOption, True, GuiStyleOption),
(menu.SurnameColorOption, True, GuiSurnameColorOption),
(menu.TextOption, True, GuiTextOption),
# This entry must be last!
(menu.Option, None, None),
)
del menu
def make_gui_option(option, dbstate, uistate, track, override=False):
"""
Stand-alone function so that Options can be used in other
ways, too. Takes an Option and returns a GuiOption.
override: if True will override the GuiOption's normal behavior
(in a GuiOption-dependant fashion, for instance in a GuiPersonOption
it will force the use of the options's value to set the GuiOption)
"""
label, widget = True, None
pmgr = GuiPluginManager.get_instance()
external_options = pmgr.get_external_opt_dict()
if option.__class__ in external_options:
widget = external_options[option.__class__]
else:
for type_, label, widget in _OPTIONS:
if isinstance(option, type_):
break
else:
raise AttributeError(
"can't make GuiOption: unknown option type: '%s'" % option)
if widget:
widget = widget(option, dbstate, uistate, track, override)
return widget, label
def add_gui_options(dialog):
"""
Stand-alone function to add user options to the GUI.
"""
if not hasattr(dialog.options, "menu"):
return
menu = dialog.options.menu
options_dict = dialog.options.options_dict
for category in menu.get_categories():
for name in menu.get_option_names(category):
option = menu.get_option(category, name)
# override option default with xml-saved value:
if name in options_dict:
option.set_value(options_dict[name])
widget, label = make_gui_option(option, dialog.dbstate,
dialog.uistate, dialog.track)
if widget is not None:
if label:
dialog.add_frame_option(category,
option.get_label(),
widget)
else:
dialog.add_frame_option(category, "", widget)
|
pmghalvorsen/gramps_branch
|
gramps/gui/plug/_guioptions.py
|
Python
|
gpl-2.0
| 74,244
|
[
"Brian"
] |
047f5876027862fbc0998268503901ca739ddca9eba53123d2b77e11a1d8faad
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2011, Regents of the University of California
# All rights reserved.
"""
lttree_check.py
The original template file format supports any variable types or file names.
However if you plan to process template files using lttree.py to create
LAMMPS-readable input/data files, then variables and file names obey certain
naming conventions. This code attempts to insure these conventions are obeyed
and to make sure that necessary variables are defined.
-- This code checks static variables (@) and basic LAMMPS syntax --
This program makes an attempt to check that the variables and file names
which appear in an "lttree" file are not mispelled (or miscapitlised).
It also attempts to check that LAMMPS syntax conventions are obeyed.
(It checks that the appropriate type of variable is located in each column).
It also attempts to check that all of the needed coeffs are defined.
-- This code does NOT check instance variables ($) --
This code does not check to make sure that all references to instance variables
(such as $atom, $bond, $angle, $dihedral, $improper or $mol variables) are valid
This means a user's input script command (like the "group" command) could refer
to an $atom or $mol which was never defined, and this code would not detect it.
(Why: Checking for instance variables requires building the entire instance tree
and checking references uses up additional memory after that. I do not do this
because memory is often very scarce after building the instance tree.)
Instead, we could check for these kinds of errors when post-processing of
the files generated by lttree.py or moltemplate.sh.
-- This is not the pretiest code I've ever written. --
"""
import sys
#from ttree import *
from lttree_styles import *
from lttree import *
from ttree_lex import InputError
if sys.version < '2.6':
raise InputError('Error: Alas, you must upgrade to a newer version of python.')
#g_no_check_msg = \
# "(If this error message is wrong, and/or you would like to continue anyway,\n"+\
# "try running moltemplate again using the \"-nocheck\" command-line-argument.)\n"
g_no_check_msg = \
'(To continue anyway, run moltemplate using the \"-nocheck\" argument.)\n'
def CheckCommonVarNames(prefix, descr_str, suffix, srcloc):
""" Check the name of variables in a lttree-file to confirm
that they follow the conventions used by lttree.
Almost any variable/category name is permitted, except for
names which closely match those reserved by lttree.
"""
cat_name, cat_ptkns, leaf_ptkns = \
DescrToCatLeafPtkns(descr_str,
srcloc)
if (cat_name.lower()=='mol'):
if (cat_name != 'mol'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Perhaps you meant \"mol\"?')
elif (cat_name.lower()=='group'):
if (cat_name != 'group'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Perhaps you meant \"group\"?')
elif (cat_name.lower()=='fix'):
if (cat_name != 'fix'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"fix\" instead.')
elif (cat_name.lower()=='atom'):
if (cat_name != 'atom'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Illegal lttree variable category: \"'+cat_name+'\"\n'+
'Use \"atom\" instead.')
elif (cat_name.lower()=='bond'):
if (cat_name != 'bond'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"bond\" instead.')
elif (cat_name.lower()=='angle'):
if (cat_name != 'angle'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"angle\" instead.')
elif (cat_name.lower()=='dihedral'):
if (cat_name != 'dihedral'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"dihedral\" instead.')
elif (cat_name.lower()=='improper'):
if (cat_name != 'improper'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"improper\" instead.')
else:
sys.stderr.write('-----------------------------------------------------\n'+
'WARNING: in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
' Unrecognised template variable category: \"'+cat_name+'\"\n'+
'-----------------------------------------------------\n')
def CheckDataFileNames(filename,
srcloc,
write_command,
fnames_found):
N_data_prefix = len(data_prefix)
#data_prefix_no_space = data_prefix.rstrip()
N_data_prefix_no_space = len(data_prefix)
section_name = filename[N_data_prefix:]
if ((section_name.lower() == 'atom') or
(section_name.lower() == 'atoms')):
if (filename != data_atoms):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_atoms+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'velocities') or
(section_name.lower() == 'velocity')):
if (filename != data_velocities):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_velocities+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'mass') or
(section_name.lower() == 'masses')):
if (filename != data_masses):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_masses+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'ellipsoids') or
(section_name.lower() == 'ellipsoid') or
(section_name.lower() == 'elipsoids') or
(section_name.lower() == 'elipsoid')):
if (filename != data_ellipsoids):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_ellipsoids+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'triangle') or
(section_name.lower() == 'triangles')):
if (filename != data_triangles):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_triangles+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'line') or
(section_name.lower() == 'lines')):
if (filename != data_lines):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_lines+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('pair coef') == 0) or
(section_name.lower().find('pair_coef') == 0) or
(section_name.lower().find('paircoef') == 0) or
(section_name.lower().find('pair by type') == 0) or
(section_name.lower().find('pair bytype') == 0) or
(section_name.lower().find('pair_by_type') == 0) or
(section_name.lower().find('pair_bytype') == 0) or
(section_name.lower().find('pairbytype') == 0)):
if (filename != data_pair_coeffs):
err_msg = 'Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+\
'Output file name (\"'+filename+'\") does not match,\n'+\
'yet overlaps closely with reserved lttree-file name.\n'+\
'Perhaps you meant \"'+data_pair_coeffs+'\"?'
if ((section_name.lower().find('by type') != -1) or
(section_name.lower().find('by_type') != -1) or
(section_name.lower().find('bytype') != -1)):
err_msg += '\n (Note: "pair" parameters are always assigned by type.\n'+\
' There\'s no need to specify \"by type\")'
raise InputError(err_msg)
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bond coef') == 0) or
(section_name.lower().find('bond_coef') == 0) or
(section_name.lower().find('bondcoef') == 0)):
if (filename != data_bond_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bond_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angle coef') == 0) or
(section_name.lower().find('angle_coef') == 0) or
(section_name.lower().find('anglecoef') == 0)):
if (filename != data_angle_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angle_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('dihedral coef') == 0) or
(section_name.lower().find('dihedral_coef') == 0) or
(section_name.lower().find('dihedralcoef') == 0)):
if (filename != data_dihedral_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_dihedral_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('improper coef') == 0) or
(section_name.lower().find('improper_coef') == 0) or
(section_name.lower().find('impropercoef') == 0)):
if (filename != data_improper_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_improper_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
# -- class2 data sections --
elif ((section_name.lower().find('bondbond coef') == 0) or
(section_name.lower().find('bondbond_coef') == 0) or
(section_name.lower().find('bondbondcoef') == 0)):
if (filename != data_bondbond_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bondbond_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bondangle coef') == 0) or
(section_name.lower().find('bondangle_coef') == 0) or
(section_name.lower().find('bondanglecoef') == 0)):
if (filename != data_bondangle_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bondangle_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('middlebondtorsion coef') == 0) or
(section_name.lower().find('middlebondtorsion_coef') == 0) or
(section_name.lower().find('middlebondtorsioncoef') == 0) or
(section_name.lower().find('middlebondtorision coef') == 0) or
(section_name.lower().find('middlebondtorision_coef') == 0) or
(section_name.lower().find('middlebondtorisioncoef') == 0)):
if (filename != data_middlebondtorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_middlebondtorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('endbondtorsion coef') == 0) or
(section_name.lower().find('endbondtorsion_coef') == 0) or
(section_name.lower().find('endbondtorsioncoef') == 0) or
(section_name.lower().find('endbondtorision coef') == 0) or
(section_name.lower().find('endbondtorision_coef') == 0) or
(section_name.lower().find('endbondtorisioncoef') == 0)):
if (filename != data_endbondtorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_endbondtorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angletorsion coef') == 0) or
(section_name.lower().find('angletorsion_coef') == 0) or
(section_name.lower().find('angletorsioncoef') == 0) or
(section_name.lower().find('angletorision coef') == 0) or
(section_name.lower().find('angletorision_coef') == 0) or
(section_name.lower().find('angletorisioncoef') == 0)):
if (filename != data_angletorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angletorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angleangletorsion coef') == 0) or
(section_name.lower().find('angleangletorsion_coef') == 0) or
(section_name.lower().find('angleangletorsioncoef') == 0) or
(section_name.lower().find('angleangletorision coef') == 0) or
(section_name.lower().find('angleangletorision_coef') == 0) or
(section_name.lower().find('angleangletorisioncoef') == 0)):
if (filename != data_angleangletorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angleangletorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bondbond13 coef') == 0) or
(section_name.lower().find('bondbond13_coef') == 0) or
(section_name.lower().find('bondbond13coef') == 0)):
if (filename != data_bondbond13_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bondbond13_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angleangle coef') == 0) or
(section_name.lower().find('angleangle_coef') == 0) or
(section_name.lower().find('angleanglecoef') == 0)):
if (filename != data_angleangle_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angleangle_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'bonds by type') or
(section_name.lower() == 'bonds bytype') or
(section_name.lower() == 'bonds_by_type') or
(section_name.lower() == 'bonds_bytype') or
(section_name.lower() == 'bondsbytype') or
(section_name.lower() == 'bond by type') or
(section_name.lower() == 'bond bytype') or
(section_name.lower() == 'bond_by_type') or
(section_name.lower() == 'bond_bytype') or
(section_name.lower() == 'bondbytype')):
if (filename != data_bonds_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bonds_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'angles by type') or
(section_name.lower() == 'angles bytype') or
(section_name.lower() == 'angles_by_type') or
(section_name.lower() == 'angles_bytype') or
(section_name.lower() == 'anglesbytype') or
(section_name.lower() == 'angle by type') or
(section_name.lower() == 'angle bytype') or
(section_name.lower() == 'angle_by_type') or
(section_name.lower() == 'angle_bytype') or
(section_name.lower() == 'anglebytype')):
if (filename != data_angles_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angles_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'dihedrals by type') or
(section_name.lower() == 'dihedrals bytype') or
(section_name.lower() == 'dihedrals_by_type') or
(section_name.lower() == 'dihedrals_bytype') or
(section_name.lower() == 'dihedralsbytype') or
(section_name.lower() == 'dihedral by type') or
(section_name.lower() == 'dihedral bytype') or
(section_name.lower() == 'dihedral_by_type') or
(section_name.lower() == 'dihedral_bytype') or
(section_name.lower() == 'dihedralbytype')):
if (filename != data_dihedrals_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_dihedrals_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'impropers by type') or
(section_name.lower() == 'impropers bytype') or
(section_name.lower() == 'impropers_by_type') or
(section_name.lower() == 'impropers_bytype') or
(section_name.lower() == 'impropersbytype') or
(section_name.lower() == 'improper by type') or
(section_name.lower() == 'improper bytype') or
(section_name.lower() == 'improper_by_type') or
(section_name.lower() == 'improper_bytype') or
(section_name.lower() == 'improperbytype')):
if (filename != data_impropers_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_impropers_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'bonds') or
(section_name.lower() == 'bond')):
if (filename != data_bonds):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bonds+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bond list') == 0) or
(section_name.lower().find('bonds list') == 0) or
(section_name.lower().find('bond_list') == 0) or
(section_name.lower().find('bonds_list') == 0)):
if (filename != data_bond_list):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bonds_by_type+'\"?')
elif (write_command != 'write'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'angles') or
(section_name.lower() == 'angle')):
if (filename != data_angles):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angles+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'dihedrals') or
(section_name.lower() == 'dihedral')):
if (filename != data_dihedrals):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_dihedrals+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'impropers') or
(section_name.lower() == 'improper')):
if (filename != data_impropers):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_impropers+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'box boundaries') or
(section_name.lower() == 'box boundary') or
(section_name.lower() == 'boundaries') or
(section_name.lower() == 'boundary') or
(section_name.lower() == 'boundary conditions') or
(section_name.lower() == 'periodic boundaries') or
(section_name.lower() == 'periodic boundary conditions') or
(section_name.lower() == 'periodic_boundaries') or
(section_name.lower() == 'periodic_boundary_conditions') or
(section_name.lower() == 'pbc')):
if ((filename != data_boundary) and
(filename != data_pbc)):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_boundary+'\"?\n'
'(Specify periodic boundary conditions this way.)')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif (filename == data_pbc):
sys.stderr.write('WARNING: write_once(\"'+data_pbc+'\") is depreciated.\n'
' Use write_once(\"'+data_boundary+'\") instead.\n')
def CheckCommonFileNames(filename,
srcloc,
write_command,
filenames_found):
"""
Check the write() or write_once() statements in a
lttree-file to make sure that the files being written
follow the conventions used by lttree.
Almost any file name is permitted, except for file names
which closely match those reserved by lttree.
"""
filenames_found.add(filename)
N_data_prefix = len(data_prefix)
#data_prefix_no_space = data_prefix.rstrip()
N_data_prefix_no_space = len(data_prefix_no_space)
if ((filename[:N_data_prefix].lower() == data_prefix.lower()) and
(filename[:N_data_prefix] != data_prefix)):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'The beginning of output file (\"'+filename+'\")\n'
'does not match yet overlaps closely with a reserved lttree-file name prefix.\n'
'(\"'+data_prefix+'\"). Perhaps you meant \"'+data_prefix+filename[N_data_prefix:]+'\"?')
# check did they forget the space?
if (filename[:N_data_prefix_no_space] == data_prefix_no_space):
if (filename[:N_data_prefix] == data_prefix):
CheckDataFileNames(filename,
srcloc,
write_command,
filenames_found)
else:
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'The beginning of output file (\"'+filename+'\")\n'
'does not match yet overlaps closely with a reserved lttree-file name prefix.\n'
'(\"'+data_prefix+'\"). Perhaps you meant \"'+data_prefix+filename[N_data_prefix_no_space:]+'\"?')
elif ((filename.lower() == 'box boundaries') or
(filename.lower() == 'box boundary') or
(filename.lower() == 'boundaries') or
(filename.lower() == 'boundary') or
(filename.lower() == 'boundary conditions') or
(filename.lower() == 'periodic boundaries') or
(filename.lower() == 'periodic boundary conditions') or
(filename.lower() == 'periodic_boundaries') or
(filename.lower() == 'periodic_boundary_conditions') or
(filename.lower() == 'pbc')):
# In that case (for one thing) they forgot the data_prefix
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_boundary+'\"?\n'
'(Specify periodic boundary conditions this way.)')
elif ((filename.lower() == 'init') or
(filename.lower() == 'in init') or
(filename.lower() == 'ininit') or
(filename.lower() == 'initialize') or
(filename.lower() == 'in initialize') or
(filename.lower() == 'ininitialize')):
if (filename != in_init):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+in_init+'\"?')
#elif (write_command != 'write_once'):
# raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
# 'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
# 'want to use the '+write_command+'() command with \"'+filename+'\".\n'
# 'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((filename.lower() == 'settings') or
(filename.lower() == 'in settings') or
(filename.lower() == 'insettings')):
if (filename != in_settings):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+in_settings+'\"?')
elif ((filename.lower() == 'set_coords') or
(filename.lower() == 'set coords') or
(filename.lower() == 'setcoords') or
(filename.lower() == 'in set_coords') or
(filename.lower() == 'in set coords') or
(filename.lower() == 'in setcoords')):
if (filename != in_set_coords):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+in_set_coords+'\"?')
def CheckSyntaxCheap(lex):
""" Parse() builds a static tree of StaticObjs by parsing text file.
-The "lex" argument is afile or input stream which has been converted
to a "TemplateLexer" object (similar to the python's built-in shlex lexer).
"""
fnames_found = set([])
prematurely_read_token = None
while True:
if prematurely_read_token == None:
command = lex.get_token()
else:
command = prematurely_read_token
prematurely_read_token = None
#print('Parse(): token = \"'+command+'\", '+lex.error_leader())
if command == lex.eof:
#print('Parse(): EOF encountered\n')
break
if ((command == 'write') or (command == 'write_once')):
open_paren = lex.get_token()
#print('Parse(): open_paren=\"'+open_paren+'\"')
if open_paren=='{':
# ..then the user neglected to specify the "filename" file-name
# argument. In that case, supply the default, ''.
# (which is shorthand for the standard out in this case)
open_curly = open_paren[0]
open_paren = ''
close_paren = ''
filename = ''
srcloc = lex.GetSrcLoc()
else:
filename = lex.get_token()
if filename == ')':
filename == ''
close_paren = ')'
else:
close_paren = lex.get_token()
open_curly = lex.get_token()
srcloc = lex.GetSrcLoc()
if ((open_curly != '{') or
((open_paren == '') and (close_paren != '')) or
((open_paren == '(') and (close_paren != ')'))):
raise InputError('Error: in '+lex.error_leader()+'\n\n'
'Syntax error at beginning of '+command+' command.')
filename = RemoveOuterQuotes(filename, lex.quotes)
# The previous line is similar to:
#filename = filename.strip(lex.quotes)
CheckCommonFileNames(filename, lex.GetSrcLoc(), command, fnames_found)
tmpl_contents = lex.ReadTemplate()
StaticObj.CleanupReadTemplate(tmpl_contents, lex)
for entry in tmpl_contents:
if (type(entry) is VarRef):
CheckCommonVarNames(entry.prefix,
entry.descr_str,
entry.suffix,
entry.srcloc)
#if (data_velocities not in fnames_found):
# sys.stderr.write('-------------------------------------------------\n'
# 'WARNING: \"'+data_velocities+'\" file not found\n'
# '-------------------------------------------------\n')
#if (data_pair_coeffs not in fnames_found):
# sys.stderr.write('-------------------------------------------------\n'
# 'WARNING: \"'+data_pair_coeffs+'\" file not found\n'
# '-------------------------------------------------\n')
if (data_atoms not in fnames_found):
sys.stderr.write('WARNING: \"'+data_atoms+'\" file not found\n')
if (data_masses not in fnames_found):
sys.stderr.write('WARNING: \"'+data_masses+'\" file not found\n')
#if (data_bonds not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_bonds+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_angles not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_angles+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_dihedrals not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_dihedrals+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_impropers not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_impropers+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_bond_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_bond_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_angle_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_angle_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_dihedral_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_dihedral_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_improper_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_imrpoper_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
if (in_init not in fnames_found):
sys.stderr.write('WARNING: \"'+in_init+'\" file not found\n')
if (in_settings not in fnames_found):
sys.stderr.write('WARNING: \"'+in_settings+'\" file not found\n')
def CheckSyntaxStatic(context_node,
root_node,
atom_column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands):
if search_instance_commands:
assert(isinstance(context_node, StaticObj))
commands = context_node.instance_commands
else:
# Note: Leaf nodes contain no commands, so skip them
if (not hasattr(context_node, 'commands')):
return
# Otherwise process their commands
commands = context_node.commands
for command in commands:
if isinstance(command, WriteFileCommand):
filename = command.filename
if filename == None: # (The "create_var" command causes this)
pass
elif (filename.find(in_prefix) == 0): #if filename begins with "In "
CheckInFileSyntax(command.tmpl_list,
root_node,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined)
elif filename == 'Data Atoms':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) == 0:
pass # skip blank lines
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass # skip comment lines
else:
syntax_err = False
if len(table[i]) < len(atom_column_names):
syntax_err = True
else:
syntax_err = False
for j in range(0, len(atom_column_names)):
if ((atom_column_names[j].lower() == 'atom-id') and
(not ((j < len(table[i])) and
isinstance(table[i][j], VarRef) and
(table[i][j].prefix in ('$','${')) and
(ExtractCatName(table[i][j].descr_str) == 'atom')))):
syntax_err = True
elif ((atom_column_names[j].lower() == 'molecule-id') and
(not ((j < len(table[i])) and
isinstance(table[i][j], VarRef) and
(table[i][j].prefix in ('$','${')) and
(ExtractCatName(table[i][j].descr_str) == 'mol')))):
syntax_err = True
elif ((atom_column_names[j].lower() == 'atom-type') and
(not ((j < len(table[i])) and
(isinstance(table[i][j], VarRef)) and
(table[i][j].prefix in ('@', '@{')) and
(table[i][j].nptr.cat_name == 'atom') and
(table[i][j].nptr.cat_node == root_node)))):
syntax_err = True
if syntax_err:
correct_rows_list = [s for s in atom_column_names]
for j in range(0, len(correct_rows_list)):
if correct_rows_list[j].lower() == 'atom-id':
correct_rows_list[j] = '$atom:id'
elif correct_rows_list[j].lower() == 'atom-type':
correct_rows_list[j] = '@atom:type'
elif correct_rows_list[j].lower() == 'molecule-id':
correct_rows_list[j] = '$mol:id'
correct_rows_msg = ' '.join(correct_rows_list)
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid "Data Atoms" syntax.\n'+
'Each line of the \"Data Atoms\" section should have this format:\n\n'
' '+correct_rows_msg+'\n\n'
'You may have forgotten to specify the LAMMPS atom_style.\n'+
'(You can do this running moltemplate with the -atom-style _style_ argument.)\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Bonds':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 4:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'bond'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'bond') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Bonds" syntax.\n'+
'Each line of the \"Data Bonds\" section should have this format:\n\n'
' $bond:id @bond:type $atom:id1 $atom:id2\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Bond List':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 3:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'bond'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Bond List" syntax.\n'+
'Each lines in this section should have this format:\n\n'
' $bond:id $atom:id1 $atom:id2\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Angles':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 5:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'angle'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'angle') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Angles" syntax.\n'+
'Each line of the \"Data Angles\" section should have this format:\n\n'
' $angle:id @angle:type $atom:id1 $atom:id2 $atom:id3\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
elif filename == 'Data Dihedrals':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 6:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'dihedral'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'dihedral') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 5:
table_entry = table[i][5]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Dihedrals" syntax.\n'+
'Each line of the \"Data Dihedrals\" section should have this format:\n\n'
' $dihedral:id @dihedral:type $atom:id1 $atom:id2 $atom:id3 $atom:id4\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Impropers':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 6:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'improper'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'improper') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 5:
table_entry = table[i][5]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Impropers" syntax.\n'+
'Each line of the \"Data Impropers\" section should have this format:\n\n'
' $improper:id @improper:type $atom:id1 $atom:id2 $atom:id3 $atom:id4\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
# A simple wildcard is the character "*" on its own.
# These are okay.
# A "compound" wildcard expression is something like
# 5*7 or
# 5* or
# *7 or
# @{bond:A}*@bond:B or
# @{bond:A}* or
# *@bond:B
# LAMMPS allows this but in moltemplate this causes
# unintended side-effects. Check for these now.
if filename in set(['Data Bond Coeffs',
'Data Angle Coeffs',
'Data Dihedral Coeffs',
'Data Improper Coeffs',
'Data Pair Coeffs']):
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if (isinstance(table[i][0], TextBlock) and
table[i][0].text == '*'):
if filename == 'Data Bond Coeffs':
data_bond_coeffs_defined.add('*')
elif filename == 'Data Angle Coeffs':
data_angle_coeffs_defined.add('*')
elif filename == 'Data Dihedral Coeffs':
data_dihedral_coeffs_defined.add('*')
elif filename == 'Data Improper Coeffs':
data_improper_coeffs_defined.add('*')
elif filename == 'Data Pair Coeffs':
data_pair_coeffs_defined.add(('*','*'))
else:
compound_wildcard = False
if (len(table[i]) > 1):
if hasattr(table[i][0],'__len__'):
ltmpl = table[i][0]
else:
ltmpl = [table[i][0]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard:
raise InputError('--- Paranoid checking: ---\n'
' Possible error near '+
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
'The wildcard symbol, \"*\", is not recommended within \"'+filename+'\".\n'
'It is safer to specify the parameters for each type explicitly.\n'
'You CAN use \"*\" wildcards, but you must disable syntax checking. To get\n'
'past this error message, run moltemplate.sh using the \"-nocheck\" option.\n')
if filename == 'Data Bond Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'bond') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Bond Coeffs" syntax.\n'
' Each line of the \"Data Bond Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @bond:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_bond_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Angle Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'angle') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Angle Coeffs" syntax.\n'
' Each line of the \"Data Angle Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @angle:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_angle_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Dihedral Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'dihedral') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Dihedral Coeffs" syntax.\n'
' Each line of the \"Data Dihedral Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @dihedral:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_dihedral_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Improper Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'improper') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Improper Coeffs" syntax.\n'
' Each line of the \"Data Improper Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @improper:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_improper_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Pair Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 0) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 0) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) > 0) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'atom') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Pair Coeffs" syntax.\n'
' Each line of the \"Data Pair Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @atom:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_pair_coeffs_defined.add((table[i][0].binding,
table[i][0].binding))
elif filename == 'Data Bonds By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 3) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'bond') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Bonds By Type\" syntax.\n'
' Each line of the \"Data Bonds By Type\" section should begin with an\n'
' @bond:type variable followed by 2 atom types.\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Angles By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 4) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'angle') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Angles By Type\" syntax.\n'
' Each line of the \"Data Angles By Type\" section should begin with an\n'
' @angle:type variable followed by 3 atom types (and 2 optional bond types).\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Dihedrals By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 5) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'dihedral') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Dihedrals By Type\" syntax.\n'
' Each line of the \"Data Dihedrals By Type\" section should begin with a\n\n'
' @dihedral:type variable followed by 4 atom types (and 3 optional bond types).\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Impropers By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 5) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'improper') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Impropers By Type\" syntax.\n'
' Each line of the \"Data Impropers By Type\" section should begin with an\n\n'
' @improper:type variable followed by 4 atom types (and 3 optional bond types).\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
# Recursively invoke AssignVarPtrs() on all (non-leaf) child nodes:
for child in context_node.children.values():
CheckSyntaxStatic(child,
root_node,
atom_column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands)
def CheckInFileSyntax(tmpl_list,
root_node,
pair_coeffs_defined,
bond_coeffs_defined,
angle_coeffs_defined,
dihedral_coeffs_defined,
improper_coeffs_defined):
table = TableFromTemplate(tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((isinstance(table[i][0], TextBlock)) and
(table[i][0].text in set(['bond_coeff',
'angle_coeff',
'dihedral_coeff',
'improper_coeff']))):
if len(table[i]) > 1: # if not deal with error later
if (isinstance(table[i][1], TextBlock) and
table[i][1].text == '*'):
if table[i][0].text == 'bond_coeff':
bond_coeffs_defined.add('*')
elif table[i][0].text == 'angle_coeff':
angle_coeffs_defined.add('*')
elif table[i][0].text == 'dihedral_coeff':
dihedral_coeffs_defined.add('*')
elif table[i][0].text == 'improper_coeff':
improper_coeffs_defined.add('*')
else:
compound_wildcard = False
if (len(table[i]) > 1):
if hasattr(table[i][1], '__len__'):
ltmpl = table[i][1]
else:
ltmpl = [table[i][1]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard:
raise InputError('---- Paranoid checking: ---\n'
' Possible error near '+
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
'The wildcard symbol, \"*\", is not recommended within a \"'+table[i][0].text+'\".\n'
'command. It is safer to specify the parameters for each bond type explicitly.\n'
'You CAN use \"*\" wildcards, but you must disable syntax checking. To get\n'
'past this error message, run moltemplate.sh using the \"-nocheck\" option.\n')
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'bondcoeff') or
(table[i][0].text.lower() == 'bond_coeff'))):
if table[i][0].text != 'bond_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"bond_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'bond') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"bond_coeff\" command.\n\n'+
' Each \"bond_coeff\" command should have the following syntax:\n\n'+
' bond_coeff @bond:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
bond_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'anglecoeff') or
(table[i][0].text.lower() == 'angle_coeff'))):
if table[i][0].text != 'angle_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"angle_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'angle') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"angle_coeff\" command.\n\n'+
' Each \"angle_coeff\" command should have the following syntax:\n\n'+
' angle_coeff @angle:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
angle_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'dihedralcoeff') or
(table[i][0].text.lower() == 'dihedral_coeff'))):
if table[i][0].text != 'dihedral_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"dihedral_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'dihedral') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"dihedral_coeff\" command.\n\n'+
' Each \"dihedral_coeff\" command should have the following syntax:\n\n'+
' dihedral_coeff @dihedral:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
dihedral_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'impropercoeff') or
(table[i][0].text.lower() == 'improper_coeff'))):
if table[i][0].text != 'improper_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"improper_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'improper') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"improper_coeff\" command.\n\n'+
' Each \"improper_coeff\" command should have the following syntax:\n\n'+
' improper_coeff @improper:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
improper_coeffs_defined.add(table[i][1].binding)
elif ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'paircoeff') or
(table[i][0].text.lower() == 'pair_coeff'))):
if table[i][0].text != 'pair_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"pair_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if len(table[i]) > 2: # if not, deal with error later
if ((isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')) and
(isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*'))):
pair_coeffs_defined.add(('*','*'))
else:
compound_wildcard = False
assert(len(table[i]) > 1)
if hasattr(table[i][1], '__len__'):
ltmpl = table[i][1]
else:
ltmpl = [table[i][1]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if hasattr(table[i][2], '__len__'):
ltmpl = table[i][2]
else:
ltmpl = [table[i][2]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard:
raise InputError('---- Paranoid checking: ---\n'
' Possible error near '+
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
'The wildcard symbol, \"*\", is not recommended within a \"pair_coeff\" command.\n'
'It is safer to specify the parameters for each bond type explicitly.\n'
'You CAN use \"*\" wildcards, but you must disable syntax checking. To get\n'
'past this error message, run moltemplate.sh using the \"-nocheck\" option.\n')
if ((len(table[i]) > 2) and
(isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')) and
(isinstance(table[i][2], TextBlock) and
(table[i][2].text == '*'))):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 2) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'atom') and
(table[i][1].nptr.cat_node == root_node) and
(isinstance(table[i][2], VarRef)) and
(table[i][2].prefix in ('@', '@{')) and
(table[i][2].nptr.cat_name == 'atom') and
(table[i][2].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"pair_coeff\" command.\n\n'+
' Each \"pair_coeff\" command should have the following syntax:\n\n'+
' pair_coeff @atom:typeI @atom:typeJ [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
pair_coeffs_defined.add((table[i][1].binding, table[i][2].binding))
def LttreeCheckParseArgs(argv, settings):
LttreeParseArgs(argv, settings)
if __name__ == "__main__":
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == "__main__", then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.)
if len(argv) == 1:
raise InputError('Error: This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
settings.infile = argv[1]
try:
settings.lex = TemplateLexer(open(settings.infile, 'r'),
settings.infile) # Parse text from file
except IOError:
sys.stderr.write('Error: unable to open file\n'
' \"'+settings.infile+'\"\n'
' for reading.\n')
sys.exit(1)
del(argv[1:2])
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"'+arg+'\"' for arg in argv[1:]]
raise InputError('Syntax Error('+g_program_name+'):\n\n'
' Unrecognized argument.\n'
' (That or there is some other problem with the argument list.)\n'
' The problem begins with these arguments:\n'
' '+(' '.join(problem_args))+'\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
####### control flow begins here: #######
if __name__ == "__main__":
g_program_name = __file__.split('/')[-1] # = 'lttree_check.py'
g_version_str = '0.76'
g_date_str = '2014-12-19'
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+'\n')
try:
# Parse the argument list and instantiate the lexer we will be using:
#settings = BasicUISettings()
#BasicUIParseArgs(sys.argv, settings)
settings = LttreeSettings()
LttreeCheckParseArgs(sys.argv, settings)
# Invoke syntax checker pass:
# This first check only checks for very simple mistakes
# (mispelled versions of standard files or variable names).
CheckSyntaxCheap(settings.lex)
settings.lex.instream.close()
# Now read the file again.
# This time parse it using StaticObj.ReadTemplate().
# (This will allow us to check for deeper problems.)
del settings.lex
settings.lex = TemplateLexer(open(settings.infile, 'r'),
settings.infile)
static_tree_root = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
sys.stderr.write(g_program_name+': parsing the class definitions...')
static_tree_root.Parse(settings.lex)
sys.stderr.write(' done\n'+g_program_name+': looking up classes...')
static_tree_root.LookupStaticRefs()
sys.stderr.write(' done\n'+g_program_name+': looking up @variables...')
AssignStaticVarPtrs(static_tree_root,
search_instance_commands=False)
replace_var_pairs = {}
FindReplacementVarPairs(static_tree_root, replace_var_pairs)
ReplaceVars(static_tree_root, replace_var_pairs,
search_instance_commands=False)
AssignStaticVarPtrs(static_tree_root,
search_instance_commands=True)
ReplaceVars(static_tree_root, replace_var_pairs,
search_instance_commands=True)
sys.stderr.write(' done\n')
#sys.stderr.write(' done\n\nclass_def_tree = ' + str(static_tree_root) + '\n\n')
data_pair_coeffs_defined = set([])
data_bond_coeffs_defined = set([])
data_angle_coeffs_defined = set([])
data_dihedral_coeffs_defined = set([])
data_improper_coeffs_defined = set([])
in_pair_coeffs_defined = set([])
in_bond_coeffs_defined = set([])
in_angle_coeffs_defined = set([])
in_dihedral_coeffs_defined = set([])
in_improper_coeffs_defined = set([])
# Now check the static syntax
# Here we check the contents of the the "write_once()" commands:
CheckSyntaxStatic(static_tree_root,
static_tree_root,
settings.column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands=False)
# Here we check the contents of the the "write()" commands:
CheckSyntaxStatic(static_tree_root,
static_tree_root,
settings.column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands=True)
if 'bond' in static_tree_root.categories:
if ((len(data_bond_coeffs_defined) > 0) and
(len(in_bond_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"bond_coeff\" commands\n'+
' OR you can have a \"Data Bond Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_bond_coeffs_defined) > 0:
bond_coeffs_defined = data_bond_coeffs_defined
else:
bond_coeffs_defined = in_bond_coeffs_defined
bond_bindings = static_tree_root.categories['bond'].bindings
for nd,bond_binding in bond_bindings.items():
if not nd.IsDeleted():
if ((not (bond_binding in bond_coeffs_defined)) and
(not HasWildCard(bond_binding.full_name)) and
(not ('*' in bond_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing bond coeff.\n\n'+
' No coeffs for the \"'+bond_binding.full_name+'\" bond type have been\n'+
'defined, but a reference to that bond type was discovered\n'+
'near '+ErrorLeader(bond_binding.refs[0].srcloc.infile,
bond_binding.refs[0].srcloc.lineno)+'. Check this file and also check\n'
'your \"bond_coeff\" commands or your \"Data Bond Coeffs" section.\n'
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'angle' in static_tree_root.categories:
if ((len(data_angle_coeffs_defined) > 0) and
(len(in_angle_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"angle_coeff\" commands\n'+
' OR you can have a \"Data Angle Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_angle_coeffs_defined) > 0:
angle_coeffs_defined = data_angle_coeffs_defined
else:
angle_coeffs_defined = in_angle_coeffs_defined
angle_bindings = static_tree_root.categories['angle'].bindings
for nd,angle_binding in angle_bindings.items():
if not nd.IsDeleted():
if ((not (angle_binding in angle_coeffs_defined)) and
#(not HasWildCard(angle_binding.full_name)) and
(not ('*' in angle_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing angle coeff.\n\n'+
' No coeffs for the \"'+angle_binding.full_name+'\" angle type have been\n'+
'defined, but a reference to that angle type was discovered\n'+
'near '+ErrorLeader(angle_binding.refs[0].srcloc.infile,
angle_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"angle_coeff\" commands or your \"Data Angle Coeffs" section.\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'dihedral' in static_tree_root.categories:
if ((len(data_dihedral_coeffs_defined) > 0) and
(len(in_dihedral_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"dihedral_coeff\" commands\n'+
' OR you can have a \"Data Dihedral Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_dihedral_coeffs_defined) > 0:
dihedral_coeffs_defined = data_dihedral_coeffs_defined
else:
dihedral_coeffs_defined = in_dihedral_coeffs_defined
dihedral_bindings = static_tree_root.categories['dihedral'].bindings
for nd,dihedral_binding in dihedral_bindings.items():
if not nd.IsDeleted():
if ((not (dihedral_binding in dihedral_coeffs_defined)) and
#(not HasWildCard(dihedral_binding.full_name)) and
(not ('*' in dihedral_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing dihedral coeff.\n\n'+
' No coeffs for the \"'+dihedral_binding.full_name+'\" dihedral type have been\n'+
'defined, but a reference to that dihedral type was discovered\n'+
'near '+ErrorLeader(dihedral_binding.refs[0].srcloc.infile,
dihedral_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"dihedral_coeff\" commands or your \"Data Dihedral Coeffs" section.\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'improper' in static_tree_root.categories:
if ((len(data_improper_coeffs_defined) > 0) and
(len(in_improper_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"improper_coeff\" commands\n'+
' OR you can have a \"Data Improper Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_improper_coeffs_defined) > 0:
improper_coeffs_defined = data_improper_coeffs_defined
else:
improper_coeffs_defined = in_improper_coeffs_defined
improper_bindings = static_tree_root.categories['improper'].bindings
for nd,improper_binding in improper_bindings.items():
if not nd.IsDeleted():
if ((not (improper_binding in improper_coeffs_defined)) and
#(not HasWildCard(improper_binding.full_name)) and
(not ('*' in improper_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing improper coeff.\n\n'+
' No coeffs for the \"'+improper_binding.full_name+'\" improper type have been\n'+
'defined, but a reference to that improper type was discovered\n'+
'near '+ErrorLeader(improper_binding.refs[0].srcloc.infile,
improper_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"improper_coeff\" commands or your \"Data Improper Coeffs" section.\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'atom' in static_tree_root.categories:
if ((len(data_pair_coeffs_defined) > 0) and
(len(in_pair_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"pair_coeff\" commands\n'+
' OR you can have a \"Data Pair Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_pair_coeffs_defined) > 0:
pair_coeffs_defined = data_pair_coeffs_defined
else:
pair_coeffs_defined = in_pair_coeffs_defined
atom_bindings = static_tree_root.categories['atom'].bindings
for nd,atom_binding in atom_bindings.items():
if not nd.IsDeleted():
if ((not ((atom_binding,atom_binding)
in
pair_coeffs_defined)) and
(not HasWildCard(atom_binding.full_name)) and
(not (('*','*') in pair_coeffs_defined)) and
(not (atom_binding.nptr.cat_name,
atom_binding.nptr.cat_node,
atom_binding.nptr.leaf_node)
in replace_var_pairs)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing pair coeff.\n\n'+
' No pair coeffs for the \"'+atom_binding.full_name+'\" atom type have been\n'+
'defined, but a reference to that atom type was discovered\n'+
'near '+ErrorLeader(atom_binding.refs[0].srcloc.infile,
atom_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"pair_coeff\" commands or your \"Data Pair Coeffs" section.\n\n'+
g_no_check_msg)
#else:
# raise InputError('Error: No atom types (@atom) have been defined.\n')
sys.stderr.write(g_program_name+': -- No errors detected. --\n')
exit(0)
except (ValueError, InputError) as err:
sys.stderr.write('\n'+str(err)+'\n')
sys.exit(1)
|
jag1g13/lammps
|
tools/moltemplate/src/lttree_check.py
|
Python
|
gpl-2.0
| 134,306
|
[
"LAMMPS"
] |
1db311443dd2e47ae2bae0b2a1167d665e4451f48f2c4cc506ee6b5754a37b10
|
# coding=utf-8
#
# script to create new visits in the patient database
#
# Command line format:
#
# create-patients.py <num-days> <num-patients_per_day> <ip of API>
#
# <num-days> = clinic days to simulate, default = 45
# <num-patients_per_day> how many visits to create per day, default = 100
# <ip of API> default = "localhost"
#
import sys
import os.path
from random import randint
from datetime import timedelta
from datetime import date
from datetime import time
from datetime import datetime
from time import sleep
import requests
import json
import imghdr
from operator import attrgetter
# local file that contains names to use
import hondurasCityList as cities
import aches_and_pains as aches
staffNames = [
'Alaniz',
'Najera',
# 'Vigil',
'Verduzco',
'Valadez',
'García',
'Griego',
'Corral',
'Agosto',
'Saavedra'
]
staffNamesLen = len (staffNames)
visitTypes = [
'Outpatient',
'Outpatient',
'Outpatient',
'Outpatient',
'Outpatient',
'Outpatient',
'Outpatient',
'Outpatient',
'Outpatient',
'Outpatient',
'Emergency',
'Emergency',
'Emergency',
'Emergency',
'Emergency',
'Emergency',
'Emergency',
'Emergency',
'Emergency',
'Specialist',
'Specialist',
'Specialist'
]
visitTypesLen = len(visitTypes)
pt_url = 'NOT YET'
visit_url = 'NOT YET'
def random_city():
# return a random state, city, neighborhood
return cities.Cities[randint(0,cities.CitiesCount-1)]
def openVisit (ptInfo):
# create post data
postData = {
'ClinicPatientID': ptInfo['ClinicPatientID'],
'VisitType': ptInfo['VisitType'],
'DateTimeIn': ptInfo['DateTimeIn'],
'StaffName': ptInfo['StaffName'],
'StaffUsername': ptInfo['StaffUsername'],
'StaffPosition': ptInfo['StaffPosition'],
'ComplaintPrimary': ptInfo['ComplaintPrimary']
}
# print (json.dumps(postData, indent=4, sort_keys=True))
# send to website
post_status = 0
newVisit = None
try:
newVisit = requests.post(visit_url, data=json.dumps(postData))
post_status = newVisit.status_code
except Exception:
post_status = 500
if (post_status != 201):
# the patient record wasn't added so show the message and stop here.
print ('Unable to add ' + ptInfo['NameLast'] + ', ERROR: ' + str(post_status))
if newVisit.text:
print ('data: ' + newVisit.text )
if newVisit and newVisit.headers and newVisit.headers['Response-String']:
print ('Reason:' + newVisit.headers['Response-String'])
return None
else:
visitData = newVisit.json()
return visitData['data']['PatientVisitID']
def closeVisit (ptInfo):
# create post data
patchData = {
'ClinicPatientID': ptInfo['ClinicPatientID'],
'PatientVisitID': ptInfo['PatientVisitID'],
'DateTimeOut': ptInfo['DateTimeOut'],
'VisitStatus': 'Closed',
'Diagnosis1': ptInfo['Diagnosis1'],
'Condition1': ptInfo['Condition1'],
'Diagnosis2': ptInfo['Diagnosis2'],
'Condition2': ptInfo['Condition2']
}
# send to website
patch_status = 0
patchResp = None
try:
patchResp = requests.patch(visit_url, data=json.dumps(patchData))
patch_status = patchResp.status_code
except Exception:
patch_status = 500
if (patch_status != 200):
# the patient record wasn't updated so show the message and stop here.
print ('Unable to update visit for ' + ptInfo['NameLast'] + ', ERROR: ' + str(patch_status))
if patchResp.text:
print ('data: ' + patchResp.text )
if patchResp and patchResp.headers and patchResp.headers['Response-String']:
print ('Reason:' + patchResp.headers['Response-String'])
return None
else:
visitData = patchResp.json()
return visitData['data']
def actionDecode (actVal):
if actVal == 1:
return "Open"
elif actVal == 2:
return "Close"
else:
return "**ERROR**"
def processTodaysVisits(ptVisitList):
postDelay = 0.5 # wait time between posts
# create a sorted list of actions from visit list
ptActions = []
for i,ptVisit in enumerate(ptVisitList):
# action time, action (1=in, 2=out), index
ptActions.append({'actionTime':ptVisit['DateTimeIn'], 'action':1, 'index':i})
# sort list by action time
ptActions.sort(key=lambda action: action['actionTime'])
#print (ptActions)
# perform each action
for action in ptActions:
# print (action['actionTime'], actionDecode(action['action']), action['index'])
if action['action'] == 1:
visitID = openVisit (ptVisitList[action['index']])
if visitID:
print (visitID)
# save the visit ID for later
ptVisitList[action['index']]['PatientVisitID'] = visitID
# sleep (postDelay)
# post a new visit
#elif action['action'] == 2:
# update visit as closed
#closeVisit (ptVisitList[action['index']])
return
def getRandomVisitType():
return visitTypes[randint(0,visitTypesLen-1)]
def getRandomComplaint():
return aches.Aches_and_pains[randint(0,aches.Aches_and_pains_len-1)]
def getRandomDoctor():
return staffNames[randint(0,staffNamesLen-1)]
def getDoctorInfo (username):
stRtn = {'name': username, 'position':'unassigned'}
try:
stParams = {}
stParams['Username'] = username;
newSt = requests.get(staff_url, params=stParams)
staffRec = newSt.json()
if (staffRec['count'] == 1):
localName = staffRec['data']['NameLast'] + ', ' + staffRec['data']['NameFirst']
stRtn = {'name': localName, 'position': staffRec['data']['Position']}
except:
stRtn = {'name': username, 'position':'unassigned', 'Exception': 'error'}
return stRtn
def getRandomPatient():
#
pt_errors = 0
pt = {}
ptRtn = None
newPtObj = []
while pt_errors < 10:
ptCity = random_city()
pt['HomeCity'] = ptCity['City']
# send to website
try:
newPt = requests.get(pt_url, params=pt)
pt_status = newPt.status_code
newPtData = newPt.json()
if (newPtData['count'] > 1):
newPtObj = newPtData['data']
break
elif (newPtData['count'] == 1):
ptRtn = newPtData['data']
break
else:
pt_errors = pt_errors + 1
except Exception:
pt_errors = pt_errors + 1
print ('ERROR getting patient:', newPt.status_code)
if newPtObj and not ptRtn:
ptRtn = newPtObj[randint(0,len(newPtObj)-1)]
return ptRtn
def create_clinic_admits (arg_num_pts_per_day):
# build a list of patients to visit the clinic
todays_patients = []
visitDate = date.today()
openTime = datetime.combine(visitDate, time(hour=7))
closeTime = datetime.now()
workDayDelta = closeTime - openTime # time the clinic is open
workDaySeconds = int(workDayDelta.total_seconds())
minVisitTime = 60*30 # min time for visit is 30 minutes
while (len(todays_patients) < arg_num_pts_per_day):
pt = getRandomPatient()
if pt:
#
# fields to add
#
DateTimeInRaw = openTime + timedelta(seconds = randint(0,(workDaySeconds-minVisitTime-1)))
patientTimeToClose = closeTime - DateTimeInRaw
DateTimeOutRaw = DateTimeInRaw + timedelta(seconds = randint(minVisitTime,int(patientTimeToClose.total_seconds())-1))
#
# get a random complaint and diagnosis
# if this complaint is not appropriate for the
# patient's sex, try again.
#
complaint = None
while True:
complaint = getRandomComplaint()
if ((pt['Sex'] == 'F')):
break
if ((pt['Sex'] == 'M') and (complaint['Sex'] != 'F')):
break
#
staffMember = getRandomDoctor()
pt.update({'StaffUsername': staffMember}) # pick one from doctor name list for now
staffInfo = getDoctorInfo (staffMember);
pt.update({'StaffName': staffInfo.get('name')})
pt.update({'StaffPosition': staffInfo.get('position')})
pt.update({'VisitType': getRandomVisitType()})
pt.update({'ComplaintPrimary': complaint['ComplaintPrimary']})
pt.update({'DateTimeIn': DateTimeInRaw.strftime('%Y-%m-%d %H:%M:%S')})
pt.update({'DateTimeOut': DateTimeOutRaw.strftime('%Y-%m-%d %H:%M:%S')})
pt.update({'Diagnosis1': complaint['Diagnosis1']})
pt.update({'Condition1': ''})
pt.update({'Diagnosis2': ''})
pt.update({'Condition2': ''})
pt.update({'PatientVisitID': ''}) # this will be filled in after a visit is created
todays_patients.append(pt)
else:
print ('Error reading patient info.')
break
processTodaysVisits(todays_patients)
return len(todays_patients)
def create_admits (arg_num_pts_per_day):
print ('Admitting ' + str(arg_num_pts_per_day) + ' patients for today, calling the API at: ' + visit_url)
arg_days = 1
return create_clinic_admits (arg_num_pts_per_day)
def main (argv):
global pt_url
global visit_url
global staff_url
# assign default values
arg_num_pts_per_day = 50
arg_api_ip = 'localhost'
# read command line args and assign parameter value
# argv[0] = the script file name
# argv[1] = the number of patient visits to create per day
# argv[2] = the ip of the API server
if len(argv) > 1:
try:
arg_num_pts_per_day = int(argv[1])
except Exception:
pass # use default
if len(argv) > 2:
# read the API's server IP
arg_api_ip = argv[2]
pt_url = 'http://' + arg_api_ip + '/patient.php'
visit_url = 'http://' + arg_api_ip + '/visit.php'
staff_url = 'http://' + arg_api_ip + '/staff.php'
# initialize the local lists
cities.init()
aches.init()
# create the patient records and display the result
print (str(create_admits (arg_num_pts_per_day)) + ' patient visits created.')
if __name__ == '__main__':
main (sys.argv)
|
MercerU-TCO/CTS
|
tools/create-admits.py
|
Python
|
gpl-3.0
| 9,240
|
[
"VisIt"
] |
ae2264accb232d914d4c5cddcf7c03aff33ba649d0ffb44f5b5961354f8dc26f
|
"""This demo solves the Stokes equations, using linear elements
enriched with a bubble for the velocity and linear elements for the
pressure (Mini elements). The sub domains for the different boundary
conditions used in this simulation are computed by the demo program in
src/demo/mesh/subdomains."""
# Copyright (C) 2007 Kristian B. Oelgaard
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Anders Logg, 2008-2009.
#
# First added: 2007-11-16
# Last changed: 2010-04-01
from dolfin import *
# Load mesh and subdomains
mesh = Mesh("../dolfin_fine.xml.gz")
sub_domains = MeshFunction("size_t", mesh, "../dolfin_fine_subdomains.xml.gz")
# Define function spaces
P1 = VectorFunctionSpace(mesh, "Lagrange", 1)
B = VectorFunctionSpace(mesh, "Bubble", 3)
Q = FunctionSpace(mesh, "CG", 1)
Mini = (P1 + B)*Q
# No-slip boundary condition for velocity
noslip = Constant((0, 0))
bc0 = DirichletBC(Mini.sub(0), noslip, sub_domains, 0)
# Inflow boundary condition for velocity
inflow = Expression(("-sin(x[1]*pi)", "0.0"))
bc1 = DirichletBC(Mini.sub(0), inflow, sub_domains, 1)
# Boundary condition for pressure at outflow
zero = Constant(0)
bc2 = DirichletBC(Mini.sub(1), zero, sub_domains, 2)
# Collect boundary conditions
bcs = [bc0, bc1, bc2]
# Define variational problem
(u, p) = TrialFunctions(Mini)
(v, q) = TestFunctions(Mini)
f = Constant((0, 0))
a = (inner(grad(u), grad(v)) - div(v)*p + q*div(u))*dx
L = inner(f, v)*dx
# Compute solution
w = Function(Mini)
solve(a == L, w, bcs)
# Split the mixed solution using deepcopy
# (needed for further computation on coefficient vector)
(u, p) = w.split(True)
print "Norm of velocity coefficient vector: %.15g" % u.vector().norm("l2")
print "Norm of pressure coefficient vector: %.15g" % p.vector().norm("l2")
# Split the mixed solution using a shallow copy
(u, p) = w.split()
# Save solution in VTK format
ufile_pvd = File("velocity.pvd")
ufile_pvd << u
pfile_pvd = File("pressure.pvd")
pfile_pvd << p
# Plot solution
plot(u)
plot(p)
interactive()
|
MiroK/DolfinSurface
|
demo/undocumented/stokes-mini/python/demo_stokes-mini.py
|
Python
|
gpl-3.0
| 2,644
|
[
"VTK"
] |
7a7eafd18a4d853aae8c8bdcb63a7ca2297d44aaa7f117d28f6212bfdeefbd71
|
#!/usr/bin/env python
'''
Fly Copter in SITL
AP_FLAKE8_CLEAN
'''
from __future__ import print_function
import copy
import math
import os
import shutil
import time
import numpy
from pymavlink import mavutil
from pymavlink import mavextra
from pymavlink import rotmat
from pysim import util
from pysim import vehicleinfo
from common import AutoTest
from common import NotAchievedException, AutoTestTimeoutException, PreconditionFailedException
from common import Test
from common import MAV_POS_TARGET_TYPE_MASK
from pymavlink.rotmat import Vector3
# get location of scripts
testdir = os.path.dirname(os.path.realpath(__file__))
SITL_START_LOCATION = mavutil.location(-35.362938, 149.165085, 584, 270)
# Flight mode switch positions are set-up in arducopter.param to be
# switch 1 = Circle
# switch 2 = Land
# switch 3 = RTL
# switch 4 = Auto
# switch 5 = Loiter
# switch 6 = Stabilize
class AutoTestCopter(AutoTest):
@staticmethod
def get_not_armable_mode_list():
return ["AUTO", "AUTOTUNE", "BRAKE", "CIRCLE", "FLIP", "LAND", "RTL", "SMART_RTL", "AVOID_ADSB", "FOLLOW"]
@staticmethod
def get_not_disarmed_settable_modes_list():
return ["FLIP", "AUTOTUNE"]
@staticmethod
def get_no_position_not_settable_modes_list():
return []
@staticmethod
def get_position_armable_modes_list():
return ["DRIFT", "GUIDED", "LOITER", "POSHOLD", "THROW"]
@staticmethod
def get_normal_armable_modes_list():
return ["ACRO", "ALT_HOLD", "SPORT", "STABILIZE", "GUIDED_NOGPS"]
def log_name(self):
return "ArduCopter"
def test_filepath(self):
return os.path.realpath(__file__)
def set_current_test_name(self, name):
self.current_test_name_directory = "ArduCopter_Tests/" + name + "/"
def sitl_start_location(self):
return SITL_START_LOCATION
def mavproxy_options(self):
ret = super(AutoTestCopter, self).mavproxy_options()
if self.frame != 'heli':
ret.append('--quadcopter')
return ret
def sitl_streamrate(self):
return 5
def vehicleinfo_key(self):
return 'ArduCopter'
def default_frame(self):
return "+"
def apply_defaultfile_parameters(self):
# Copter passes in a defaults_filepath in place of applying
# parameters afterwards.
pass
def defaults_filepath(self):
return self.model_defaults_filepath(self.frame)
def wait_disarmed_default_wait_time(self):
return 120
def close(self):
super(AutoTestCopter, self).close()
# [2014/05/07] FC Because I'm doing a cross machine build
# (source is on host, build is on guest VM) I cannot hard link
# This flag tells me that I need to copy the data out
if self.copy_tlog:
shutil.copy(self.logfile, self.buildlog)
def is_copter(self):
return True
def get_stick_arming_channel(self):
return int(self.get_parameter("RCMAP_YAW"))
def get_disarm_delay(self):
return int(self.get_parameter("DISARM_DELAY"))
def set_autodisarm_delay(self, delay):
self.set_parameter("DISARM_DELAY", delay)
def user_takeoff(self, alt_min=30):
'''takeoff using mavlink takeoff command'''
self.run_cmd(mavutil.mavlink.MAV_CMD_NAV_TAKEOFF,
0, # param1
0, # param2
0, # param3
0, # param4
0, # param5
0, # param6
alt_min # param7
)
self.progress("Ran command")
self.wait_for_alt(alt_min)
def takeoff(self,
alt_min=30,
takeoff_throttle=1700,
require_absolute=True,
mode="STABILIZE",
timeout=120):
"""Takeoff get to 30m altitude."""
self.progress("TAKEOFF")
self.change_mode(mode)
if not self.armed():
self.wait_ready_to_arm(require_absolute=require_absolute, timeout=timeout)
self.zero_throttle()
self.arm_vehicle()
if mode == 'GUIDED':
self.user_takeoff(alt_min=alt_min)
else:
self.set_rc(3, takeoff_throttle)
self.wait_for_alt(alt_min=alt_min, timeout=timeout)
self.hover()
self.progress("TAKEOFF COMPLETE")
def wait_for_alt(self, alt_min=30, timeout=30, max_err=5):
"""Wait for minimum altitude to be reached."""
self.wait_altitude(alt_min - 1,
(alt_min + max_err),
relative=True,
timeout=timeout)
def land_and_disarm(self, timeout=60):
"""Land the quad."""
self.progress("STARTING LANDING")
self.change_mode("LAND")
self.wait_landed_and_disarmed(timeout=timeout)
def wait_landed_and_disarmed(self, min_alt=6, timeout=60):
"""Wait to be landed and disarmed"""
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
alt = m.relative_alt / 1000.0 # mm -> m
if alt > min_alt:
self.wait_for_alt(min_alt, timeout=timeout)
# self.wait_statustext("SIM Hit ground", timeout=timeout)
self.wait_disarmed()
def hover(self, hover_throttle=1500):
self.set_rc(3, hover_throttle)
# Climb/descend to a given altitude
def setAlt(self, desiredAlt=50):
pos = self.mav.location(relative_alt=True)
if pos.alt > desiredAlt:
self.set_rc(3, 1300)
self.wait_altitude((desiredAlt-5), desiredAlt, relative=True)
if pos.alt < (desiredAlt-5):
self.set_rc(3, 1800)
self.wait_altitude((desiredAlt-5), desiredAlt, relative=True)
self.hover()
# Takeoff, climb to given altitude, and fly east for 10 seconds
def takeoffAndMoveAway(self, dAlt=50, dDist=50):
self.progress("Centering sticks")
self.set_rc_from_map({
1: 1500,
2: 1500,
3: 1000,
4: 1500,
})
self.takeoff(alt_min=dAlt)
self.change_mode("ALT_HOLD")
self.progress("Yaw to east")
self.set_rc(4, 1580)
self.wait_heading(90)
self.set_rc(4, 1500)
self.progress("Fly eastbound away from home")
self.set_rc(2, 1800)
self.delay_sim_time(10)
self.set_rc(2, 1500)
self.hover()
self.progress("Copter staging 50 meters east of home at 50 meters altitude In mode Alt Hold")
# loiter - fly south west, then loiter within 5m position and altitude
def loiter(self, holdtime=10, maxaltchange=5, maxdistchange=5):
"""Hold loiter position."""
self.takeoff(10, mode="LOITER")
# first aim south east
self.progress("turn south east")
self.set_rc(4, 1580)
self.wait_heading(170)
self.set_rc(4, 1500)
# fly south east 50m
self.set_rc(2, 1100)
self.wait_distance(50)
self.set_rc(2, 1500)
# wait for copter to slow moving
self.wait_groundspeed(0, 2)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = self.mav.location()
tstart = self.get_sim_time()
self.progress("Holding loiter at %u meters for %u seconds" %
(start_altitude, holdtime))
while self.get_sim_time_cached() < tstart + holdtime:
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
pos = self.mav.location()
delta = self.get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
self.progress("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
raise NotAchievedException(
"Loiter alt shifted %u meters (> limit %u)" %
(alt_delta, maxaltchange))
if delta > maxdistchange:
raise NotAchievedException(
"Loiter shifted %u meters (> limit of %u)" %
(delta, maxdistchange))
self.progress("Loiter OK for %u seconds" % holdtime)
self.progress("Climb to 30m")
self.change_alt(30)
self.progress("Descend to 20m")
self.change_alt(20)
self.do_RTL()
def watch_altitude_maintained(self, min_alt, max_alt, timeout=10):
'''watch alt, relative alt must remain between min_alt and max_alt'''
tstart = self.get_sim_time_cached()
while True:
if self.get_sim_time_cached() - tstart > timeout:
return
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
if m.alt <= min_alt:
raise NotAchievedException("Altitude not maintained: want >%f got=%f" % (min_alt, m.alt))
def test_mode_ALT_HOLD(self):
self.takeoff(10, mode="ALT_HOLD")
self.watch_altitude_maintained(9, 11, timeout=5)
# feed in full elevator and aileron input and make sure we
# retain altitude:
self.set_rc_from_map({
1: 1000,
2: 1000,
})
self.watch_altitude_maintained(9, 11, timeout=5)
self.set_rc_from_map({
1: 1500,
2: 1500,
})
self.do_RTL()
def fly_to_origin(self, final_alt=10):
origin = self.poll_message("GPS_GLOBAL_ORIGIN")
self.change_mode("GUIDED")
self.guided_move_global_relative_alt(origin.latitude,
origin.longitude,
final_alt)
def change_alt(self, alt_min, climb_throttle=1920, descend_throttle=1080):
"""Change altitude."""
def adjust_altitude(current_alt, target_alt, accuracy):
if math.fabs(current_alt - target_alt) <= accuracy:
self.hover()
elif current_alt < target_alt:
self.set_rc(3, climb_throttle)
else:
self.set_rc(3, descend_throttle)
self.wait_altitude(
(alt_min - 5),
alt_min,
relative=True,
called_function=lambda current_alt, target_alt: adjust_altitude(current_alt, target_alt, 1)
)
self.hover()
def setGCSfailsafe(self, paramValue=0):
# Slow down the sim rate if GCS Failsafe is in use
if paramValue == 0:
self.set_parameter("FS_GCS_ENABLE", paramValue)
self.set_parameter("SIM_SPEEDUP", 10)
else:
self.set_parameter("SIM_SPEEDUP", 4)
self.set_parameter("FS_GCS_ENABLE", paramValue)
# fly a square in alt_hold mode
def fly_square(self, side=50, timeout=300):
self.takeoff(20, mode="ALT_HOLD")
"""Fly a square, flying N then E ."""
tstart = self.get_sim_time()
# ensure all sticks in the middle
self.set_rc_from_map({
1: 1500,
2: 1500,
3: 1500,
4: 1500,
})
# switch to loiter mode temporarily to stop us from rising
self.change_mode('LOITER')
# first aim north
self.progress("turn right towards north")
self.set_rc(4, 1580)
self.wait_heading(10)
self.set_rc(4, 1500)
# save bottom left corner of box as waypoint
self.progress("Save WP 1 & 2")
self.save_wp()
# switch back to ALT_HOLD mode
self.change_mode('ALT_HOLD')
# pitch forward to fly north
self.progress("Going north %u meters" % side)
self.set_rc(2, 1300)
self.wait_distance(side)
self.set_rc(2, 1500)
# save top left corner of square as waypoint
self.progress("Save WP 3")
self.save_wp()
# roll right to fly east
self.progress("Going east %u meters" % side)
self.set_rc(1, 1700)
self.wait_distance(side)
self.set_rc(1, 1500)
# save top right corner of square as waypoint
self.progress("Save WP 4")
self.save_wp()
# pitch back to fly south
self.progress("Going south %u meters" % side)
self.set_rc(2, 1700)
self.wait_distance(side)
self.set_rc(2, 1500)
# save bottom right corner of square as waypoint
self.progress("Save WP 5")
self.save_wp()
# roll left to fly west
self.progress("Going west %u meters" % side)
self.set_rc(1, 1300)
self.wait_distance(side)
self.set_rc(1, 1500)
# save bottom left corner of square (should be near home) as waypoint
self.progress("Save WP 6")
self.save_wp()
# reduce throttle again
self.set_rc(3, 1500)
# descend to 10m
self.progress("Descend to 10m in Loiter")
self.change_mode('LOITER')
self.set_rc(3, 1200)
time_left = timeout - (self.get_sim_time() - tstart)
self.progress("timeleft = %u" % time_left)
if time_left < 20:
time_left = 20
self.wait_altitude(-10, 10, timeout=time_left, relative=True)
self.set_rc(3, 1500)
self.save_wp()
# save the stored mission to file
mavproxy = self.start_mavproxy()
num_wp = self.save_mission_to_file_using_mavproxy(
mavproxy,
os.path.join(testdir, "ch7_mission.txt"))
self.stop_mavproxy(mavproxy)
if not num_wp:
self.fail_list.append("save_mission_to_file")
self.progress("save_mission_to_file failed")
self.progress("test: Fly a mission from 1 to %u" % num_wp)
self.change_mode('AUTO')
self.set_current_waypoint(1)
self.wait_waypoint(0, num_wp-1, timeout=500)
self.progress("test: MISSION COMPLETE: passed!")
self.land_and_disarm()
# enter RTL mode and wait for the vehicle to disarm
def do_RTL(self, distance_min=None, check_alt=True, distance_max=10, timeout=250):
"""Enter RTL mode and wait for the vehicle to disarm at Home."""
self.change_mode("RTL")
self.hover()
self.wait_rtl_complete(check_alt=check_alt, distance_max=distance_max, timeout=timeout)
def wait_rtl_complete(self, check_alt=True, distance_max=10, timeout=250):
"""Wait for RTL to reach home and disarm"""
self.progress("Waiting RTL to reach Home and disarm")
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + timeout:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
alt = m.relative_alt / 1000.0 # mm -> m
home_distance = self.distance_to_home(use_cached_home=True)
home = ""
alt_valid = alt <= 1
distance_valid = home_distance < distance_max
if check_alt:
if alt_valid and distance_valid:
home = "HOME"
else:
if distance_valid:
home = "HOME"
self.progress("Alt: %.02f HomeDist: %.02f %s" %
(alt, home_distance, home))
# our post-condition is that we are disarmed:
if not self.armed():
if home == "":
raise NotAchievedException("Did not get home")
# success!
return
raise AutoTestTimeoutException("Did not get home and disarm")
def fly_loiter_to_alt(self):
"""loiter to alt"""
self.context_push()
ex = None
try:
self.set_parameter("PLND_ENABLED", 1)
self.set_parameter("PLND_TYPE", 4)
self.set_analog_rangefinder_parameters()
self.reboot_sitl()
num_wp = self.load_mission("copter_loiter_to_alt.txt")
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1550)
self.wait_current_waypoint(2)
self.set_rc(3, 1500)
self.wait_waypoint(0, num_wp-1, timeout=500)
self.wait_disarmed()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
# Tests all actions and logic behind the radio failsafe
def fly_throttle_failsafe(self, side=60, timeout=360):
self.start_subtest("If you haven't taken off yet RC failure should be instant disarm")
self.change_mode("STABILIZE")
self.set_parameter("DISARM_DELAY", 0)
self.arm_vehicle()
self.set_parameter("SIM_RC_FAIL", 1)
self.disarm_wait(timeout=1)
self.set_parameter("SIM_RC_FAIL", 0)
self.set_parameter("DISARM_DELAY", 10)
# Trigger an RC failure with the failsafe disabled. Verify no action taken.
self.start_subtest("Radio failsafe disabled test: FS_THR_ENABLE=0 should take no failsafe action")
self.set_parameter('FS_THR_ENABLE', 0)
self.set_parameter('FS_OPTIONS', 0)
self.takeoffAndMoveAway()
self.set_parameter("SIM_RC_FAIL", 1)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.set_parameter("SIM_RC_FAIL", 0)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.end_subtest("Completed Radio failsafe disabled test")
# Trigger an RC failure, verify radio failsafe triggers,
# restore radio, verify RC function by changing modes to cicle
# and stabilize.
self.start_subtest("Radio failsafe recovery test")
self.set_parameter('FS_THR_ENABLE', 1)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("RTL")
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 0)
self.delay_sim_time(5)
self.set_rc(5, 1050)
self.wait_mode("CIRCLE")
self.set_rc(5, 1950)
self.wait_mode("STABILIZE")
self.end_subtest("Completed Radio failsafe recovery test")
# Trigger and RC failure, verify failsafe triggers and RTL completes
self.start_subtest("Radio failsafe RTL with no options test: FS_THR_ENABLE=1 & FS_OPTIONS=0")
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("RTL")
self.wait_rtl_complete()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe RTL with no options test")
# Trigger and RC failure, verify failsafe triggers and land completes
self.start_subtest("Radio failsafe LAND with no options test: FS_THR_ENABLE=3 & FS_OPTIONS=0")
self.set_parameter('FS_THR_ENABLE', 3)
self.takeoffAndMoveAway()
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe LAND with no options test")
# Trigger and RC failure, verify failsafe triggers and SmartRTL completes
self.start_subtest("Radio failsafe SmartRTL->RTL with no options test: FS_THR_ENABLE=4 & FS_OPTIONS=0")
self.set_parameter('FS_THR_ENABLE', 4)
self.takeoffAndMoveAway()
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe SmartRTL->RTL with no options test")
# Trigger and RC failure, verify failsafe triggers and SmartRTL completes
self.start_subtest("Radio failsafe SmartRTL->Land with no options test: FS_THR_ENABLE=5 & FS_OPTIONS=0")
self.set_parameter('FS_THR_ENABLE', 5)
self.takeoffAndMoveAway()
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe SmartRTL_Land with no options test")
# Trigger a GPS failure and RC failure, verify RTL fails into
# land mode and completes
self.start_subtest("Radio failsafe RTL fails into land mode due to bad position.")
self.set_parameter('FS_THR_ENABLE', 1)
self.takeoffAndMoveAway()
self.set_parameter('SIM_GPS_DISABLE', 1)
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.set_parameter('SIM_GPS_DISABLE', 0)
self.wait_ekf_happy()
self.end_subtest("Completed Radio failsafe RTL fails into land mode due to bad position.")
# Trigger a GPS failure and RC failure, verify SmartRTL fails
# into land mode and completes
self.start_subtest("Radio failsafe SmartRTL->RTL fails into land mode due to bad position.")
self.set_parameter('FS_THR_ENABLE', 4)
self.takeoffAndMoveAway()
self.set_parameter('SIM_GPS_DISABLE', 1)
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.set_parameter('SIM_GPS_DISABLE', 0)
self.wait_ekf_happy()
self.end_subtest("Completed Radio failsafe SmartRTL->RTL fails into land mode due to bad position.")
# Trigger a GPS failure and RC failure, verify SmartRTL fails
# into land mode and completes
self.start_subtest("Radio failsafe SmartRTL->LAND fails into land mode due to bad position.")
self.set_parameter('FS_THR_ENABLE', 5)
self.takeoffAndMoveAway()
self.set_parameter('SIM_GPS_DISABLE', 1)
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.set_parameter('SIM_GPS_DISABLE', 0)
self.wait_ekf_happy()
self.end_subtest("Completed Radio failsafe SmartRTL->LAND fails into land mode due to bad position.")
# Trigger a GPS failure, then restore the GPS. Trigger an RC
# failure, verify SmartRTL fails into RTL and completes
self.start_subtest("Radio failsafe SmartRTL->RTL fails into RTL mode due to no path.")
self.set_parameter('FS_THR_ENABLE', 4)
self.takeoffAndMoveAway()
self.set_parameter('SIM_GPS_DISABLE', 1)
self.wait_statustext("SmartRTL deactivated: bad position", timeout=60)
self.set_parameter('SIM_GPS_DISABLE', 0)
self.wait_ekf_happy()
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("RTL")
self.wait_rtl_complete()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe SmartRTL->RTL fails into RTL mode due to no path.")
# Trigger a GPS failure, then restore the GPS. Trigger an RC
# failure, verify SmartRTL fails into Land and completes
self.start_subtest("Radio failsafe SmartRTL->LAND fails into land mode due to no path.")
self.set_parameter('FS_THR_ENABLE', 5)
self.takeoffAndMoveAway()
self.set_parameter('SIM_GPS_DISABLE', 1)
self.wait_statustext("SmartRTL deactivated: bad position", timeout=60)
self.set_parameter('SIM_GPS_DISABLE', 0)
self.wait_ekf_happy()
self.delay_sim_time(5)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe SmartRTL->LAND fails into land mode due to no path.")
# Trigger an RC failure in guided mode with the option enabled
# to continue in guided. Verify no failsafe action takes place
self.start_subtest("Radio failsafe with option to continue in guided mode: FS_THR_ENABLE=1 & FS_OPTIONS=4")
self.set_parameter("SYSID_MYGCS", self.mav.source_system)
self.setGCSfailsafe(1)
self.set_parameter('FS_THR_ENABLE', 1)
self.set_parameter('FS_OPTIONS', 4)
self.takeoffAndMoveAway()
self.change_mode("GUIDED")
self.set_parameter("SIM_RC_FAIL", 1)
self.delay_sim_time(5)
self.wait_mode("GUIDED")
self.set_parameter("SIM_RC_FAIL", 0)
self.delay_sim_time(5)
self.change_mode("ALT_HOLD")
self.setGCSfailsafe(0)
# self.change_mode("RTL")
# self.wait_disarmed()
self.end_subtest("Completed Radio failsafe with option to continue in guided mode")
# Trigger an RC failure in AUTO mode with the option enabled
# to continue the mission. Verify no failsafe action takes
# place
self.start_subtest("Radio failsafe RTL with option to continue mission: FS_THR_ENABLE=1 & FS_OPTIONS=1")
self.set_parameter('FS_OPTIONS', 1)
self.progress("# Load copter_mission")
num_wp = self.load_mission("copter_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_mission failed")
# self.takeoffAndMoveAway()
self.change_mode("AUTO")
self.set_parameter("SIM_RC_FAIL", 1)
self.delay_sim_time(5)
self.wait_mode("AUTO")
self.set_parameter("SIM_RC_FAIL", 0)
self.delay_sim_time(5)
self.wait_mode("AUTO")
# self.change_mode("RTL")
# self.wait_disarmed()
self.end_subtest("Completed Radio failsafe RTL with option to continue mission")
# Trigger an RC failure in AUTO mode without the option
# enabled to continue. Verify failsafe triggers and RTL
# completes
self.start_subtest("Radio failsafe RTL in mission without "
"option to continue should RTL: FS_THR_ENABLE=1 & FS_OPTIONS=0")
self.set_parameter('FS_OPTIONS', 0)
self.set_parameter("SIM_RC_FAIL", 1)
self.wait_mode("RTL")
self.wait_rtl_complete()
self.clear_mission(mavutil.mavlink.MAV_MISSION_TYPE_MISSION)
self.set_parameter("SIM_RC_FAIL", 0)
self.end_subtest("Completed Radio failsafe RTL in mission without option to continue")
self.progress("All radio failsafe tests complete")
self.set_parameter('FS_THR_ENABLE', 0)
self.reboot_sitl()
# Tests all actions and logic behind the GCS failsafe
def fly_gcs_failsafe(self, side=60, timeout=360):
try:
self.test_gcs_failsafe(side=side, timeout=timeout)
except Exception as ex:
self.setGCSfailsafe(0)
self.set_parameter('FS_OPTIONS', 0)
self.disarm_vehicle(force=True)
self.reboot_sitl()
raise ex
def test_gcs_failsafe(self, side=60, timeout=360):
# Test double-SmartRTL; ensure we do SmarRTL twice rather than
# landing (tests fix for actual bug)
self.set_parameter("SYSID_MYGCS", self.mav.source_system)
self.context_push()
self.start_subtest("GCS failsafe SmartRTL twice")
self.setGCSfailsafe(3)
self.set_parameter('FS_OPTIONS', 8)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_statustext("GCS Failsafe")
def ensure_smartrtl(mav, m):
if m.get_type() != "HEARTBEAT":
return
# can't use mode_is here because we're in the message hook
print("Mode: %s" % self.mav.flightmode)
if self.mav.flightmode != "SMART_RTL":
raise NotAchievedException("Not in SMART_RTL")
self.install_message_hook_context(ensure_smartrtl)
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.set_heartbeat_rate(0)
self.wait_statustext("GCS Failsafe")
self.wait_disarmed()
self.end_subtest("GCS failsafe SmartRTL twice")
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.context_pop()
# Trigger telemetry loss with failsafe disabled. Verify no action taken.
self.start_subtest("GCS failsafe disabled test: FS_GCS_ENABLE=0 should take no failsafe action")
self.setGCSfailsafe(0)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.set_heartbeat_rate(self.speedup)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.end_subtest("Completed GCS failsafe disabled test")
# Trigger telemetry loss with failsafe enabled. Verify
# failsafe triggers to RTL. Restore telemetry, verify failsafe
# clears, and change modes.
self.start_subtest("GCS failsafe recovery test: FS_GCS_ENABLE=1 & FS_OPTIONS=0")
self.setGCSfailsafe(1)
self.set_parameter('FS_OPTIONS', 0)
self.set_heartbeat_rate(0)
self.wait_mode("RTL")
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.change_mode("LOITER")
self.end_subtest("Completed GCS failsafe recovery test")
# Trigger telemetry loss with failsafe enabled. Verify failsafe triggers and RTL completes
self.start_subtest("GCS failsafe RTL with no options test: FS_GCS_ENABLE=1 & FS_OPTIONS=0")
self.setGCSfailsafe(1)
self.set_parameter('FS_OPTIONS', 0)
self.set_heartbeat_rate(0)
self.wait_mode("RTL")
self.wait_rtl_complete()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe RTL with no options test")
# Trigger telemetry loss with failsafe enabled. Verify failsafe triggers and land completes
self.start_subtest("GCS failsafe LAND with no options test: FS_GCS_ENABLE=5 & FS_OPTIONS=0")
self.setGCSfailsafe(5)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe land with no options test")
# Trigger telemetry loss with failsafe enabled. Verify failsafe triggers and SmartRTL completes
self.start_subtest("GCS failsafe SmartRTL->RTL with no options test: FS_GCS_ENABLE=3 & FS_OPTIONS=0")
self.setGCSfailsafe(3)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe SmartRTL->RTL with no options test")
# Trigger telemetry loss with failsafe enabled. Verify failsafe triggers and SmartRTL completes
self.start_subtest("GCS failsafe SmartRTL->Land with no options test: FS_GCS_ENABLE=4 & FS_OPTIONS=0")
self.setGCSfailsafe(4)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe SmartRTL->Land with no options test")
# Trigger telemetry loss with an invalid failsafe value. Verify failsafe triggers and RTL completes
self.start_subtest("GCS failsafe invalid value with no options test: FS_GCS_ENABLE=99 & FS_OPTIONS=0")
self.setGCSfailsafe(99)
self.takeoffAndMoveAway()
self.set_heartbeat_rate(0)
self.wait_mode("RTL")
self.wait_rtl_complete()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe invalid value with no options test")
# Trigger telemetry loss with failsafe enabled to test FS_OPTIONS settings
self.start_subtest("GCS failsafe with option bit tests: FS_GCS_ENABLE=1 & FS_OPTIONS=64/2/16")
num_wp = self.load_mission("copter_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_mission failed")
self.setGCSfailsafe(1)
self.set_parameter('FS_OPTIONS', 16)
self.takeoffAndMoveAway()
self.progress("Testing continue in pilot controlled modes")
self.set_heartbeat_rate(0)
self.wait_statustext("GCS Failsafe - Continuing Pilot Control", timeout=60)
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.progress("Testing continue in auto mission")
self.set_parameter('FS_OPTIONS', 2)
self.change_mode("AUTO")
self.delay_sim_time(5)
self.set_heartbeat_rate(0)
self.wait_statustext("GCS Failsafe - Continuing Auto Mode", timeout=60)
self.delay_sim_time(5)
self.wait_mode("AUTO")
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.progress("Testing continue landing in land mode")
self.set_parameter('FS_OPTIONS', 8)
self.change_mode("LAND")
self.delay_sim_time(5)
self.set_heartbeat_rate(0)
self.wait_statustext("GCS Failsafe - Continuing Landing", timeout=60)
self.delay_sim_time(5)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_heartbeat_rate(self.speedup)
self.wait_statustext("GCS Failsafe Cleared", timeout=60)
self.end_subtest("Completed GCS failsafe with option bits")
self.setGCSfailsafe(0)
self.set_parameter('FS_OPTIONS', 0)
self.progress("All GCS failsafe tests complete")
self.reboot_sitl()
# Tests all actions and logic behind the battery failsafe
def fly_battery_failsafe(self, timeout=300):
ex = None
try:
self.test_battery_failsafe(timeout=timeout)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.set_parameter('BATT_LOW_VOLT', 0)
self.set_parameter('BATT_CRT_VOLT', 0)
self.set_parameter('BATT_FS_LOW_ACT', 0)
self.set_parameter('BATT_FS_CRT_ACT', 0)
self.set_parameter('FS_OPTIONS', 0)
self.reboot_sitl()
if ex is not None:
raise ex
def test_battery_failsafe(self, timeout=300):
self.progress("Configure battery failsafe parameters")
self.set_parameters({
'SIM_SPEEDUP': 4,
'BATT_LOW_VOLT': 11.5,
'BATT_CRT_VOLT': 10.1,
'BATT_FS_LOW_ACT': 0,
'BATT_FS_CRT_ACT': 0,
'FS_OPTIONS': 0,
'SIM_BATT_VOLTAGE': 12.5,
})
# Trigger low battery condition with failsafe disabled. Verify
# no action taken.
self.start_subtest("Batt failsafe disabled test")
self.takeoffAndMoveAway()
m = self.mav.recv_match(type='BATTERY_STATUS', blocking=True, timeout=1)
if m.charge_state != mavutil.mavlink.MAV_BATTERY_CHARGE_STATE_OK:
raise NotAchievedException("Expected state ok")
self.set_parameter('SIM_BATT_VOLTAGE', 11.4)
self.wait_statustext("Battery 1 is low", timeout=60)
m = self.mav.recv_match(type='BATTERY_STATUS', blocking=True, timeout=1)
if m.charge_state != mavutil.mavlink.MAV_BATTERY_CHARGE_STATE_LOW:
raise NotAchievedException("Expected state low")
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.set_parameter('SIM_BATT_VOLTAGE', 10.0)
self.wait_statustext("Battery 1 is critical", timeout=60)
m = self.mav.recv_match(type='BATTERY_STATUS', blocking=True, timeout=1)
if m.charge_state != mavutil.mavlink.MAV_BATTERY_CHARGE_STATE_CRITICAL:
raise NotAchievedException("Expected state critical")
self.delay_sim_time(5)
self.wait_mode("ALT_HOLD")
self.change_mode("RTL")
self.wait_rtl_complete()
self.set_parameter('SIM_BATT_VOLTAGE', 12.5)
self.reboot_sitl()
self.end_subtest("Completed Batt failsafe disabled test")
# TWO STAGE BATTERY FAILSAFE: Trigger low battery condition,
# then critical battery condition. Verify RTL and Land actions
# complete.
self.start_subtest("Two stage battery failsafe test with RTL and Land")
self.takeoffAndMoveAway()
self.delay_sim_time(3)
self.set_parameter('BATT_FS_LOW_ACT', 2)
self.set_parameter('BATT_FS_CRT_ACT', 1)
self.set_parameter('SIM_BATT_VOLTAGE', 11.4)
self.wait_statustext("Battery 1 is low", timeout=60)
self.delay_sim_time(5)
self.wait_mode("RTL")
self.delay_sim_time(10)
self.set_parameter('SIM_BATT_VOLTAGE', 10.0)
self.wait_statustext("Battery 1 is critical", timeout=60)
self.delay_sim_time(5)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter('SIM_BATT_VOLTAGE', 12.5)
self.reboot_sitl()
self.end_subtest("Completed two stage battery failsafe test with RTL and Land")
# TWO STAGE BATTERY FAILSAFE: Trigger low battery condition,
# then critical battery condition. Verify both SmartRTL
# actions complete
self.start_subtest("Two stage battery failsafe test with SmartRTL")
self.takeoffAndMoveAway()
self.set_parameter('BATT_FS_LOW_ACT', 3)
self.set_parameter('BATT_FS_CRT_ACT', 4)
self.delay_sim_time(10)
self.set_parameter('SIM_BATT_VOLTAGE', 11.4)
self.wait_statustext("Battery 1 is low", timeout=60)
self.delay_sim_time(5)
self.wait_mode("SMART_RTL")
self.change_mode("LOITER")
self.delay_sim_time(10)
self.set_parameter('SIM_BATT_VOLTAGE', 10.0)
self.wait_statustext("Battery 1 is critical", timeout=60)
self.delay_sim_time(5)
self.wait_mode("SMART_RTL")
self.wait_disarmed()
self.set_parameter('SIM_BATT_VOLTAGE', 12.5)
self.reboot_sitl()
self.end_subtest("Completed two stage battery failsafe test with SmartRTL")
# Trigger low battery condition in land mode with FS_OPTIONS
# set to allow land mode to continue. Verify landing completes
# uninterrupted.
self.start_subtest("Battery failsafe with FS_OPTIONS set to continue landing")
self.takeoffAndMoveAway()
self.set_parameter('FS_OPTIONS', 8)
self.change_mode("LAND")
self.delay_sim_time(5)
self.set_parameter('SIM_BATT_VOLTAGE', 11.4)
self.wait_statustext("Battery 1 is low", timeout=60)
self.delay_sim_time(5)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter('SIM_BATT_VOLTAGE', 12.5)
self.reboot_sitl()
self.end_subtest("Completed battery failsafe with FS_OPTIONS set to continue landing")
# Trigger a critical battery condition, which triggers a land
# mode failsafe. Trigger an RC failure. Verify the RC failsafe
# is prevented from stopping the low battery landing.
self.start_subtest("Battery failsafe critical landing")
self.takeoffAndMoveAway(100, 50)
self.set_parameter('FS_OPTIONS', 0)
self.set_parameter('BATT_FS_LOW_ACT', 1)
self.set_parameter('BATT_FS_CRT_ACT', 1)
self.set_parameter('FS_THR_ENABLE', 1)
self.delay_sim_time(5)
self.set_parameter('SIM_BATT_VOLTAGE', 10.0)
self.wait_statustext("Battery 1 is critical", timeout=60)
self.wait_mode("LAND")
self.delay_sim_time(10)
self.set_parameter("SIM_RC_FAIL", 1)
self.delay_sim_time(10)
self.wait_mode("LAND")
self.wait_landed_and_disarmed()
self.set_parameter('SIM_BATT_VOLTAGE', 12.5)
self.set_parameter("SIM_RC_FAIL", 0)
self.reboot_sitl()
self.end_subtest("Completed battery failsafe critical landing")
# Trigger low battery condition with failsafe set to terminate. Copter will disarm and crash.
self.start_subtest("Battery failsafe terminate")
self.takeoffAndMoveAway()
self.set_parameter('BATT_FS_LOW_ACT', 5)
self.delay_sim_time(10)
self.set_parameter('SIM_BATT_VOLTAGE', 11.4)
self.wait_statustext("Battery 1 is low", timeout=60)
self.wait_disarmed()
self.end_subtest("Completed terminate failsafe test")
self.progress("All Battery failsafe tests complete")
# Tests the vibration failsafe
def test_vibration_failsafe(self):
self.context_push()
# takeoff in Loiter to 20m
self.takeoff(20, mode="LOITER")
# simulate accel bias caused by high vibration
self.set_parameters({
'SIM_ACC1_BIAS_Z': 2,
'SIM_ACC2_BIAS_Z': 2,
'SIM_ACC3_BIAS_Z': 2,
})
# wait for Vibration compensation warning and change to LAND mode
self.wait_statustext("Vibration compensation ON", timeout=30)
self.change_mode("LAND")
# check vehicle descends to 2m or less within 40 seconds
self.wait_altitude(-5, 2, timeout=40, relative=True)
# force disarm of vehicle (it will likely not automatically disarm)
self.disarm_vehicle(force=True)
# revert simulated accel bias and reboot to restore EKF health
self.context_pop()
self.reboot_sitl()
# fly_stability_patch - fly south, then hold loiter within 5m
# position and altitude and reduce 1 motor to 60% efficiency
def fly_stability_patch(self,
holdtime=30,
maxaltchange=5,
maxdistchange=10):
self.takeoff(10, mode="LOITER")
# first south
self.progress("turn south")
self.set_rc(4, 1580)
self.wait_heading(180)
self.set_rc(4, 1500)
# fly west 80m
self.set_rc(2, 1100)
self.wait_distance(80)
self.set_rc(2, 1500)
# wait for copter to slow moving
self.wait_groundspeed(0, 2)
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
start = self.mav.location()
tstart = self.get_sim_time()
self.progress("Holding loiter at %u meters for %u seconds" %
(start_altitude, holdtime))
# cut motor 1's to efficiency
self.progress("Cutting motor 1 to 65% efficiency")
self.set_parameter("SIM_ENGINE_MUL", 0.65)
while self.get_sim_time_cached() < tstart + holdtime:
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
pos = self.mav.location()
delta = self.get_distance(start, pos)
alt_delta = math.fabs(m.alt - start_altitude)
self.progress("Loiter Dist: %.2fm, alt:%u" % (delta, m.alt))
if alt_delta > maxaltchange:
raise NotAchievedException(
"Loiter alt shifted %u meters (> limit %u)" %
(alt_delta, maxaltchange))
if delta > maxdistchange:
raise NotAchievedException(
("Loiter shifted %u meters (> limit of %u)" %
(delta, maxdistchange)))
# restore motor 1 to 100% efficiency
self.set_parameter("SIM_ENGINE_MUL", 1.0)
self.progress("Stability patch and Loiter OK for %us" % holdtime)
self.progress("RTL after stab patch")
self.do_RTL()
def debug_arming_issue(self):
while True:
self.send_mavlink_arm_command()
m = self.mav.recv_match(blocking=True, timeout=1)
if m is None:
continue
if m.get_type() in ["STATUSTEXT", "COMMAND_ACK"]:
print("Got: %s" % str(m))
if self.mav.motors_armed():
self.progress("Armed")
return
# fly_fence_test - fly east until you hit the horizontal circular fence
avoid_behave_slide = 0
def fly_fence_avoid_test_radius_check(self, timeout=180, avoid_behave=avoid_behave_slide):
using_mode = "LOITER" # must be something which adjusts velocity!
self.change_mode(using_mode)
self.set_parameter("FENCE_ENABLE", 1) # fence
self.set_parameter("FENCE_TYPE", 2) # circle
fence_radius = 15
self.set_parameter("FENCE_RADIUS", fence_radius)
fence_margin = 3
self.set_parameter("FENCE_MARGIN", fence_margin)
self.set_parameter("AVOID_ENABLE", 1)
self.set_parameter("AVOID_BEHAVE", avoid_behave)
self.set_parameter("RC10_OPTION", 40) # avoid-enable
self.wait_ready_to_arm()
self.set_rc(10, 2000)
home_distance = self.distance_to_home(use_cached_home=True)
if home_distance > 5:
raise PreconditionFailedException("Expected to be within 5m of home")
self.zero_throttle()
self.arm_vehicle()
self.set_rc(3, 1700)
self.wait_altitude(10, 100, relative=True)
self.set_rc(3, 1500)
self.set_rc(2, 1400)
self.wait_distance_to_home(12, 20)
tstart = self.get_sim_time()
push_time = 70 # push against barrier for 60 seconds
failed_max = False
failed_min = False
while True:
if self.get_sim_time() - tstart > push_time:
self.progress("Push time up")
break
# make sure we don't RTL:
if not self.mode_is(using_mode):
raise NotAchievedException("Changed mode away from %s" % using_mode)
distance = self.distance_to_home(use_cached_home=True)
inner_radius = fence_radius - fence_margin
want_min = inner_radius - 1 # allow 1m either way
want_max = inner_radius + 1 # allow 1m either way
self.progress("Push: distance=%f %f<want<%f" %
(distance, want_min, want_max))
if distance < want_min:
if failed_min is False:
self.progress("Failed min")
failed_min = True
if distance > want_max:
if failed_max is False:
self.progress("Failed max")
failed_max = True
if failed_min and failed_max:
raise NotAchievedException("Failed both min and max checks. Clever")
if failed_min:
raise NotAchievedException("Failed min")
if failed_max:
raise NotAchievedException("Failed max")
self.set_rc(2, 1500)
self.do_RTL()
def fly_fence_avoid_test(self, timeout=180):
self.fly_fence_avoid_test_radius_check(avoid_behave=1, timeout=timeout)
self.fly_fence_avoid_test_radius_check(avoid_behave=0, timeout=timeout)
def assert_prearm_failure(self, expected_statustext, timeout=5, ignore_prearm_failures=[]):
seen_statustext = False
seen_command_ack = False
self.drain_mav()
tstart = self.get_sim_time_cached()
arm_last_send = 0
while True:
if seen_command_ack and seen_statustext:
break
now = self.get_sim_time_cached()
if now - tstart > timeout:
raise NotAchievedException(
"Did not see failure-to-arm messages (statustext=%s command_ack=%s" %
(seen_statustext, seen_command_ack))
if now - arm_last_send > 1:
arm_last_send = now
self.send_mavlink_arm_command()
m = self.mav.recv_match(blocking=True, timeout=1)
if m is None:
continue
if m.get_type() == "STATUSTEXT":
if expected_statustext in m.text:
self.progress("Got: %s" % str(m))
seen_statustext = True
elif "PreArm" in m.text and m.text[8:] not in ignore_prearm_failures:
self.progress("Got: %s" % str(m))
raise NotAchievedException("Unexpected prearm failure (%s)" % m.text)
if m.get_type() == "COMMAND_ACK":
print("Got: %s" % str(m))
if m.command == mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM:
if m.result != 4:
raise NotAchievedException("command-ack says we didn't fail to arm")
self.progress("Got: %s" % str(m))
seen_command_ack = True
if self.mav.motors_armed():
raise NotAchievedException("Armed when we shouldn't have")
# fly_fence_test - fly east until you hit the horizontal circular fence
def fly_fence_test(self, timeout=180):
# enable fence, disable avoidance
self.set_parameter("FENCE_ENABLE", 1)
self.set_parameter("AVOID_ENABLE", 0)
self.change_mode("LOITER")
self.wait_ready_to_arm()
# fence requires home to be set:
m = self.poll_home_position()
if m is None:
raise NotAchievedException("Did not receive HOME_POSITION")
self.progress("home: %s" % str(m))
self.start_subtest("ensure we can't arm if outside fence")
self.load_fence("fence-in-middle-of-nowhere.txt")
self.delay_sim_time(5) # let fence check run so it loads-from-eeprom
self.assert_prearm_failure("vehicle outside fence")
self.progress("Failed to arm outside fence (good!)")
self.clear_fence()
self.delay_sim_time(5) # let fence breach clear
self.drain_mav()
self.end_subtest("ensure we can't arm if outside fence")
self.start_subtest("ensure we can't arm with bad radius")
self.context_push()
self.set_parameter("FENCE_RADIUS", -1)
self.assert_prearm_failure("Invalid FENCE_RADIUS value")
self.context_pop()
self.progress("Failed to arm with bad radius")
self.drain_mav()
self.end_subtest("ensure we can't arm with bad radius")
self.start_subtest("ensure we can't arm with bad alt")
self.context_push()
self.set_parameter("FENCE_ALT_MAX", -1)
self.assert_prearm_failure("Invalid FENCE_ALT_MAX value")
self.context_pop()
self.progress("Failed to arm with bad altitude")
self.end_subtest("ensure we can't arm with bad radius")
self.start_subtest("Check breach-fence behaviour")
self.set_parameter("FENCE_TYPE", 2)
self.takeoff(10, mode="LOITER")
# first east
self.progress("turn east")
self.set_rc(4, 1580)
self.wait_heading(160, timeout=60)
self.set_rc(4, 1500)
fence_radius = self.get_parameter("FENCE_RADIUS")
self.progress("flying forward (east) until we hit fence")
pitching_forward = True
self.set_rc(2, 1100)
self.progress("Waiting for fence breach")
tstart = self.get_sim_time()
while not self.mode_is("RTL"):
if self.get_sim_time_cached() - tstart > 30:
raise NotAchievedException("Did not breach fence")
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
alt = m.relative_alt / 1000.0 # mm -> m
home_distance = self.distance_to_home(use_cached_home=True)
self.progress("Alt: %.02f HomeDistance: %.02f (fence radius=%f)" %
(alt, home_distance, fence_radius))
self.progress("Waiting until we get home and disarm")
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + timeout:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
alt = m.relative_alt / 1000.0 # mm -> m
home_distance = self.distance_to_home(use_cached_home=True)
self.progress("Alt: %.02f HomeDistance: %.02f" %
(alt, home_distance))
# recenter pitch sticks once we're home so we don't fly off again
if pitching_forward and home_distance < 50:
pitching_forward = False
self.set_rc(2, 1475)
# disable fence
self.set_parameter("FENCE_ENABLE", 0)
if (alt <= 1 and home_distance < 10) or (not self.armed() and home_distance < 10):
# reduce throttle
self.zero_throttle()
self.change_mode("LAND")
self.wait_landed_and_disarmed()
self.progress("Reached home OK")
self.zero_throttle()
return
# give we're testing RTL, doing one here probably doesn't make sense
home_distance = self.distance_to_home(use_cached_home=True)
raise AutoTestTimeoutException(
"Fence test failed to reach home (%fm distance) - "
"timed out after %u seconds" % (home_distance, timeout,))
# fly_alt_max_fence_test - fly up until you hit the fence ceiling
def fly_alt_max_fence_test(self):
self.takeoff(10, mode="LOITER")
"""Hold loiter position."""
# enable fence, disable avoidance
self.set_parameter("FENCE_ENABLE", 1)
self.set_parameter("AVOID_ENABLE", 0)
self.set_parameter("FENCE_TYPE", 1)
self.change_alt(10)
# first east
self.progress("turning east")
self.set_rc(4, 1580)
self.wait_heading(160, timeout=60)
self.set_rc(4, 1500)
self.progress("flying east 20m")
self.set_rc(2, 1100)
self.wait_distance(20)
self.progress("flying up")
self.set_rc_from_map({
2: 1500,
3: 1800,
})
# wait for fence to trigger
self.wait_mode('RTL', timeout=120)
self.wait_rtl_complete()
self.zero_throttle()
# fly_alt_min_fence_test - fly down until you hit the fence floor
def fly_alt_min_fence_test(self):
self.takeoff(30, mode="LOITER", timeout=60)
# enable fence, disable avoidance
self.set_parameter("AVOID_ENABLE", 0)
self.set_parameter("FENCE_TYPE", 8)
self.set_parameter("FENCE_ALT_MIN", 20)
self.change_alt(30)
# Activate the floor fence
# TODO this test should run without requiring this
self.do_fence_enable()
# first east
self.progress("turn east")
self.set_rc(4, 1580)
self.wait_heading(160, timeout=60)
self.set_rc(4, 1500)
# fly forward (east) at least 20m
self.set_rc(2, 1100)
self.wait_distance(20)
# stop flying forward and start flying down:
self.set_rc_from_map({
2: 1500,
3: 1200,
})
# wait for fence to trigger
self.wait_mode('RTL', timeout=120)
self.wait_rtl_complete()
# Disable the fence using mavlink command to ensure cleaned up SITL state
self.do_fence_disable()
self.zero_throttle()
def fly_fence_floor_enabled_landing(self):
""" fly_fence_floor_enabled_landing. Ensures we can initiate and complete
an RTL while the fence is enabled. """
fence_bit = mavutil.mavlink.MAV_SYS_STATUS_GEOFENCE
self.progress("Test Landing while fence floor enabled")
self.set_parameter("AVOID_ENABLE", 0)
self.set_parameter("FENCE_TYPE", 15)
self.set_parameter("FENCE_ALT_MIN", 10)
self.set_parameter("FENCE_ALT_MAX", 20)
self.change_mode("GUIDED")
self.wait_ready_to_arm()
self.arm_vehicle()
self.user_takeoff(alt_min=15)
# Check fence is enabled
self.do_fence_enable()
self.assert_fence_enabled()
# Change to RC controlled mode
self.change_mode('LOITER')
self.set_rc(3, 1800)
self.wait_mode('RTL', timeout=120)
self.wait_landed_and_disarmed()
self.assert_fence_enabled()
# Assert fence is not healthy
self.assert_sensor_state(fence_bit, healthy=False)
# Disable the fence using mavlink command to ensure cleaned up SITL state
self.do_fence_disable()
self.assert_fence_disabled()
def fly_gps_glitch_loiter_test(self, timeout=30, max_distance=20):
"""fly_gps_glitch_loiter_test. Fly south east in loiter and test
reaction to gps glitch."""
self.takeoff(10, mode="LOITER")
# turn on simulator display of gps and actual position
if self.use_map:
self.show_gps_and_sim_positions(True)
# set-up gps glitch array
glitch_lat = [0.0002996,
0.0006958,
0.0009431,
0.0009991,
0.0009444,
0.0007716,
0.0006221]
glitch_lon = [0.0000717,
0.0000912,
0.0002761,
0.0002626,
0.0002807,
0.0002049,
0.0001304]
glitch_num = len(glitch_lat)
self.progress("GPS Glitches:")
for i in range(1, glitch_num):
self.progress("glitch %d %.7f %.7f" %
(i, glitch_lat[i], glitch_lon[i]))
# turn south east
self.progress("turn south east")
self.set_rc(4, 1580)
try:
self.wait_heading(150)
self.set_rc(4, 1500)
# fly forward (south east) at least 60m
self.set_rc(2, 1100)
self.wait_distance(60)
self.set_rc(2, 1500)
# wait for copter to slow down
except Exception as e:
if self.use_map:
self.show_gps_and_sim_positions(False)
raise e
# record time and position
tstart = self.get_sim_time()
tnow = tstart
start_pos = self.sim_location()
# initialise current glitch
glitch_current = 0
self.progress("Apply first glitch")
self.set_parameter("SIM_GPS_GLITCH_X", glitch_lat[glitch_current])
self.set_parameter("SIM_GPS_GLITCH_Y", glitch_lon[glitch_current])
# record position for 30 seconds
while tnow < tstart + timeout:
tnow = self.get_sim_time_cached()
desired_glitch_num = int((tnow - tstart) * 2.2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# turn off glitching if we've reached the end of glitch list
if glitch_current >= glitch_num:
glitch_current = -1
self.progress("Completed Glitches")
self.set_parameter("SIM_GPS_GLITCH_X", 0)
self.set_parameter("SIM_GPS_GLITCH_Y", 0)
else:
self.progress("Applying glitch %u" % glitch_current)
# move onto the next glitch
self.set_parameter("SIM_GPS_GLITCH_X", glitch_lat[glitch_current])
self.set_parameter("SIM_GPS_GLITCH_Y", glitch_lon[glitch_current])
# start displaying distance moved after all glitches applied
if glitch_current == -1:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
alt = m.alt/1000.0 # mm -> m
curr_pos = self.sim_location()
moved_distance = self.get_distance(curr_pos, start_pos)
self.progress("Alt: %.02f Moved: %.0f" %
(alt, moved_distance))
if moved_distance > max_distance:
raise NotAchievedException(
"Moved over %u meters, Failed!" % max_distance)
else:
self.drain_mav()
# disable gps glitch
if glitch_current != -1:
self.set_parameter("SIM_GPS_GLITCH_X", 0)
self.set_parameter("SIM_GPS_GLITCH_Y", 0)
if self.use_map:
self.show_gps_and_sim_positions(False)
self.progress("GPS glitch test passed!"
" stayed within %u meters for %u seconds" %
(max_distance, timeout))
self.do_RTL()
# re-arming is problematic because the GPS is glitching!
self.reboot_sitl()
def fly_gps_glitch_loiter_test2(self):
"""test vehicle handles GPS glitch (aka EKF Reset) without twitching"""
self.context_push()
self.takeoff(10, mode="LOITER")
# wait for vehicle to level
self.wait_attitude(desroll=0, despitch=0, timeout=10, tolerance=1)
# apply glitch
self.set_parameter("SIM_GPS_GLITCH_X", 0.001)
# check lean angles remain stable for 20 seconds
tstart = self.get_sim_time()
while self.get_sim_time_cached() - tstart < 20:
m = self.mav.recv_match(type='ATTITUDE', blocking=True)
roll_deg = math.degrees(m.roll)
pitch_deg = math.degrees(m.pitch)
self.progress("checking att: roll=%f pitch=%f " % (roll_deg, pitch_deg))
if abs(roll_deg) > 2 or abs(pitch_deg) > 2:
raise NotAchievedException("fly_gps_glitch_loiter_test2 failed, roll or pitch moved during GPS glitch")
# RTL, remove glitch and reboot sitl
self.do_RTL()
self.context_pop()
self.reboot_sitl()
# fly_gps_glitch_auto_test - fly mission and test reaction to gps glitch
def fly_gps_glitch_auto_test(self, timeout=180):
# set-up gps glitch array
glitch_lat = [0.0002996,
0.0006958,
0.0009431,
0.0009991,
0.0009444,
0.0007716,
0.0006221]
glitch_lon = [0.0000717,
0.0000912,
0.0002761,
0.0002626,
0.0002807,
0.0002049,
0.0001304]
glitch_num = len(glitch_lat)
self.progress("GPS Glitches:")
for i in range(1, glitch_num):
self.progress("glitch %d %.7f %.7f" %
(i, glitch_lat[i], glitch_lon[i]))
# Fly mission #1
self.progress("# Load copter_glitch_mission")
# load the waypoint count
num_wp = self.load_mission("copter_glitch_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_glitch_mission failed")
# turn on simulator display of gps and actual position
if self.use_map:
self.show_gps_and_sim_positions(True)
self.progress("test: Fly a mission from 1 to %u" % num_wp)
self.set_current_waypoint(1)
self.change_mode("STABILIZE")
self.wait_ready_to_arm()
self.zero_throttle()
self.arm_vehicle()
# switch into AUTO mode and raise throttle
self.change_mode('AUTO')
self.set_rc(3, 1500)
# wait until 100m from home
try:
self.wait_distance(100, 5, 90)
except Exception as e:
if self.use_map:
self.show_gps_and_sim_positions(False)
raise e
# record time and position
tstart = self.get_sim_time()
# initialise current glitch
glitch_current = 0
self.progress("Apply first glitch")
self.set_parameter("SIM_GPS_GLITCH_X", glitch_lat[glitch_current])
self.set_parameter("SIM_GPS_GLITCH_Y", glitch_lon[glitch_current])
# record position for 30 seconds
while glitch_current < glitch_num:
tnow = self.get_sim_time()
desired_glitch_num = int((tnow - tstart) * 2.2)
if desired_glitch_num > glitch_current and glitch_current != -1:
glitch_current = desired_glitch_num
# apply next glitch
if glitch_current < glitch_num:
self.progress("Applying glitch %u" % glitch_current)
self.set_parameter("SIM_GPS_GLITCH_X",
glitch_lat[glitch_current])
self.set_parameter("SIM_GPS_GLITCH_Y",
glitch_lon[glitch_current])
# turn off glitching
self.progress("Completed Glitches")
self.set_parameter("SIM_GPS_GLITCH_X", 0)
self.set_parameter("SIM_GPS_GLITCH_Y", 0)
# continue with the mission
self.wait_waypoint(0, num_wp-1, timeout=500)
# wait for arrival back home
self.wait_distance_to_home(0, 10, timeout=timeout)
# turn off simulator display of gps and actual position
if self.use_map:
self.show_gps_and_sim_positions(False)
self.progress("GPS Glitch test Auto completed: passed!")
self.wait_disarmed()
# re-arming is problematic because the GPS is glitching!
self.reboot_sitl()
# fly_simple - assumes the simple bearing is initialised to be
# directly north flies a box with 100m west, 15 seconds north,
# 50 seconds east, 15 seconds south
def fly_simple(self, side=50):
self.takeoff(10, mode="LOITER")
# set SIMPLE mode for all flight modes
self.set_parameter("SIMPLE", 63)
# switch to stabilize mode
self.change_mode('STABILIZE')
self.set_rc(3, 1545)
# fly south 50m
self.progress("# Flying south %u meters" % side)
self.set_rc(1, 1300)
self.wait_distance(side, 5, 60)
self.set_rc(1, 1500)
# fly west 8 seconds
self.progress("# Flying west for 8 seconds")
self.set_rc(2, 1300)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < (tstart + 8):
self.mav.recv_match(type='VFR_HUD', blocking=True)
self.set_rc(2, 1500)
# fly north 25 meters
self.progress("# Flying north %u meters" % (side/2.0))
self.set_rc(1, 1700)
self.wait_distance(side/2, 5, 60)
self.set_rc(1, 1500)
# fly east 8 seconds
self.progress("# Flying east for 8 seconds")
self.set_rc(2, 1700)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < (tstart + 8):
self.mav.recv_match(type='VFR_HUD', blocking=True)
self.set_rc(2, 1500)
# hover in place
self.hover()
self.do_RTL(timeout=500)
# fly_super_simple - flies a circle around home for 45 seconds
def fly_super_simple(self, timeout=45):
self.takeoff(10, mode="LOITER")
# fly forward 20m
self.progress("# Flying forward 20 meters")
self.set_rc(2, 1300)
self.wait_distance(20, 5, 60)
self.set_rc(2, 1500)
# set SUPER SIMPLE mode for all flight modes
self.set_parameter("SUPER_SIMPLE", 63)
# switch to stabilize mode
self.change_mode("ALT_HOLD")
self.set_rc(3, 1500)
# start copter yawing slowly
self.set_rc(4, 1550)
# roll left for timeout seconds
self.progress("# rolling left from pilot's POV for %u seconds"
% timeout)
self.set_rc(1, 1300)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < (tstart + timeout):
self.mav.recv_match(type='VFR_HUD', blocking=True)
# stop rolling and yawing
self.set_rc(1, 1500)
self.set_rc(4, 1500)
# restore simple mode parameters to default
self.set_parameter("SUPER_SIMPLE", 0)
# hover in place
self.hover()
self.do_RTL()
# fly_circle - flies a circle with 20m radius
def fly_circle(self, holdtime=36):
# the following should not be required. But there appears to
# be a physics failure in the simulation which is causing CI
# to fall over a lot. -pb 202007021209
self.reboot_sitl()
self.takeoff(10, mode="LOITER")
# face west
self.progress("turn west")
self.set_rc(4, 1580)
self.wait_heading(270)
self.set_rc(4, 1500)
# set CIRCLE radius
self.set_parameter("CIRCLE_RADIUS", 3000)
# fly forward (east) at least 100m
self.set_rc(2, 1100)
self.wait_distance(100)
# return pitch stick back to middle
self.set_rc(2, 1500)
# set CIRCLE mode
self.change_mode('CIRCLE')
# wait
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
start_altitude = m.alt
tstart = self.get_sim_time()
self.progress("Circle at %u meters for %u seconds" %
(start_altitude, holdtime))
while self.get_sim_time_cached() < tstart + holdtime:
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
self.progress("heading %d" % m.heading)
self.progress("CIRCLE OK for %u seconds" % holdtime)
self.do_RTL()
# test_mag_fail - test failover of compass in EKF
def test_mag_fail(self):
# we want both EK2 and EK3
self.set_parameter("EK2_ENABLE", 1)
self.set_parameter("EK3_ENABLE", 1)
self.takeoff(10, mode="LOITER")
self.change_mode('CIRCLE')
self.delay_sim_time(20)
self.context_collect("STATUSTEXT")
self.progress("Failing first compass")
self.set_parameter("SIM_MAG1_FAIL", 1)
# we want for the message twice, one for EK2 and again for EK3
self.wait_statustext("EKF2 IMU0 switching to compass 1", check_context=True)
self.wait_statustext("EKF3 IMU0 switching to compass 1", check_context=True)
self.progress("compass switch 1 OK")
self.delay_sim_time(2)
self.context_clear_collection("STATUSTEXT")
self.progress("Failing 2nd compass")
self.set_parameter("SIM_MAG2_FAIL", 1)
self.wait_statustext("EKF2 IMU0 switching to compass 2", check_context=True)
self.wait_statustext("EKF3 IMU0 switching to compass 2", check_context=True)
self.progress("compass switch 2 OK")
self.delay_sim_time(2)
self.context_clear_collection("STATUSTEXT")
self.progress("Failing 3rd compass")
self.set_parameter("SIM_MAG3_FAIL", 1)
self.delay_sim_time(2)
self.set_parameter("SIM_MAG1_FAIL", 0)
self.wait_statustext("EKF2 IMU0 switching to compass 0", check_context=True)
self.wait_statustext("EKF3 IMU0 switching to compass 0", check_context=True)
self.progress("compass switch 0 OK")
self.do_RTL()
def fly_flip(self):
ex = None
try:
self.set_message_rate_hz(mavutil.mavlink.MAVLINK_MSG_ID_ATTITUDE, 100)
self.takeoff(20)
self.hover()
old_speedup = self.get_parameter("SIM_SPEEDUP")
self.set_parameter('SIM_SPEEDUP', 1)
self.progress("Flipping in roll")
self.set_rc(1, 1700)
self.send_cmd_do_set_mode('FLIP') # don't wait for success
self.wait_attitude(despitch=0, desroll=45, tolerance=30)
self.wait_attitude(despitch=0, desroll=90, tolerance=30)
self.wait_attitude(despitch=0, desroll=-45, tolerance=30)
self.progress("Waiting for level")
self.set_rc(1, 1500) # can't change quickly enough!
self.wait_attitude(despitch=0, desroll=0, tolerance=5)
self.progress("Regaining altitude")
self.change_mode('ALT_HOLD')
self.wait_for_alt(20, max_err=40)
self.progress("Flipping in pitch")
self.set_rc(2, 1700)
self.send_cmd_do_set_mode('FLIP') # don't wait for success
self.wait_attitude(despitch=45, desroll=0, tolerance=30)
# can't check roll here as it flips from 0 to -180..
self.wait_attitude(despitch=90, tolerance=30)
self.wait_attitude(despitch=-45, tolerance=30)
self.progress("Waiting for level")
self.set_rc(2, 1500) # can't change quickly enough!
self.wait_attitude(despitch=0, desroll=0, tolerance=5)
self.set_parameter('SIM_SPEEDUP', old_speedup)
self.do_RTL()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.set_message_rate_hz(mavutil.mavlink.MAVLINK_MSG_ID_ATTITUDE, 0)
if ex is not None:
raise ex
def configure_EKFs_to_use_optical_flow_instead_of_GPS(self):
'''configure EKF to use optical flow instead of GPS'''
ahrs_ekf_type = self.get_parameter("AHRS_EKF_TYPE")
if ahrs_ekf_type == 2:
self.set_parameter("EK2_GPS_TYPE", 3)
if ahrs_ekf_type == 3:
self.set_parameters({
"EK3_SRC1_POSXY": 0,
"EK3_SRC1_VELXY": 5,
"EK3_SRC1_VELZ": 0,
})
def optical_flow(self):
'''test optical low works'''
self.start_subtest("Make sure no crash if no rangefinder")
self.set_parameter("SIM_FLOW_ENABLE", 1)
self.set_parameter("FLOW_TYPE", 10)
self.configure_EKFs_to_use_optical_flow_instead_of_GPS()
self.reboot_sitl()
self.change_mode('LOITER')
self.delay_sim_time(5)
self.wait_statustext("Need Position Estimate", timeout=300)
# fly_optical_flow_limits - test EKF navigation limiting
def fly_optical_flow_limits(self):
ex = None
self.context_push()
try:
self.set_parameter("SIM_FLOW_ENABLE", 1)
self.set_parameter("FLOW_TYPE", 10)
self.configure_EKFs_to_use_optical_flow_instead_of_GPS()
self.set_analog_rangefinder_parameters()
self.set_parameter("SIM_GPS_DISABLE", 1)
self.set_parameter("SIM_TERRAIN", 0)
self.reboot_sitl()
# we can't takeoff in loiter as we need flow healthy
self.takeoff(alt_min=5, mode='ALT_HOLD', require_absolute=False, takeoff_throttle=1800)
self.change_mode('LOITER')
# speed should be limited to <10m/s
self.set_rc(2, 1000)
tstart = self.get_sim_time()
timeout = 60
started_climb = False
while self.get_sim_time_cached() - tstart < timeout:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
spd = math.sqrt(m.vx**2 + m.vy**2) * 0.01
alt = m.relative_alt*0.001
# calculate max speed from altitude above the ground
margin = 2.0
max_speed = alt * 1.5 + margin
self.progress("%0.1f: Low Speed: %f (want <= %u) alt=%.1f" %
(self.get_sim_time_cached() - tstart,
spd,
max_speed, alt))
if spd > max_speed:
raise NotAchievedException(("Speed should be limited by"
"EKF optical flow limits"))
# after 30 seconds start climbing
if not started_climb and self.get_sim_time_cached() - tstart > 30:
started_climb = True
self.set_rc(3, 1900)
self.progress("Moving higher")
# check altitude is not climbing above 35m
if alt > 35:
raise NotAchievedException("Alt should be limited by EKF optical flow limits")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.set_rc(2, 1500)
self.context_pop()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_autotune(self):
"""Test autotune mode"""
rlld = self.get_parameter("ATC_RAT_RLL_D")
rlli = self.get_parameter("ATC_RAT_RLL_I")
rllp = self.get_parameter("ATC_RAT_RLL_P")
self.takeoff(10)
# hold position in loiter
self.change_mode('AUTOTUNE')
tstart = self.get_sim_time()
sim_time_expected = 5000
deadline = tstart + sim_time_expected
while self.get_sim_time_cached() < deadline:
now = self.get_sim_time_cached()
m = self.mav.recv_match(type='STATUSTEXT',
blocking=True,
timeout=1)
if m is None:
continue
self.progress("STATUSTEXT (%u<%u): %s" % (now, deadline, m.text))
if "AutoTune: Success" in m.text:
self.progress("AUTOTUNE OK (%u seconds)" % (now - tstart))
# near enough for now:
self.change_mode('LAND')
self.wait_landed_and_disarmed()
# check the original gains have been re-instated
if (rlld != self.get_parameter("ATC_RAT_RLL_D") or
rlli != self.get_parameter("ATC_RAT_RLL_I") or
rllp != self.get_parameter("ATC_RAT_RLL_P")):
raise NotAchievedException("AUTOTUNE gains still present")
return
raise NotAchievedException("AUTOTUNE failed (%u seconds)" %
(self.get_sim_time() - tstart))
def fly_autotune_switch(self):
"""Test autotune on a switch with gains being saved"""
# autotune changes a set of parameters on the vehicle which
# are not in our context. That changes the flight
# characterstics, which we can't afford between runs. So
# completely reset the simulated vehicle after the run is
# complete by "customising" the commandline here:
self.customise_SITL_commandline([])
self.context_push()
ex = None
try:
self.fly_autotune_switch_body()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def fly_autotune_switch_body(self):
self.set_parameter("RC8_OPTION", 17)
self.set_parameter("ATC_RAT_RLL_FLTT", 20)
rlld = self.get_parameter("ATC_RAT_RLL_D")
rlli = self.get_parameter("ATC_RAT_RLL_I")
rllp = self.get_parameter("ATC_RAT_RLL_P")
rllt = self.get_parameter("ATC_RAT_RLL_FLTT")
self.progress("AUTOTUNE pre-gains are P:%f I:%f D:%f" %
(self.get_parameter("ATC_RAT_RLL_P"),
self.get_parameter("ATC_RAT_RLL_I"),
self.get_parameter("ATC_RAT_RLL_D")))
self.takeoff(10, mode='LOITER')
# hold position in loiter and run autotune
self.set_rc(8, 1850)
self.wait_mode('AUTOTUNE')
tstart = self.get_sim_time()
sim_time_expected = 5000
deadline = tstart + sim_time_expected
while self.get_sim_time_cached() < deadline:
now = self.get_sim_time_cached()
m = self.mav.recv_match(type='STATUSTEXT',
blocking=True,
timeout=1)
if m is None:
continue
self.progress("STATUSTEXT (%u<%u): %s" % (now, deadline, m.text))
if "AutoTune: Success" in m.text:
self.progress("AUTOTUNE OK (%u seconds)" % (now - tstart))
# Check original gains are re-instated
self.set_rc(8, 1100)
self.delay_sim_time(1)
self.progress("AUTOTUNE original gains are P:%f I:%f D:%f" %
(self.get_parameter("ATC_RAT_RLL_P"), self.get_parameter("ATC_RAT_RLL_I"),
self.get_parameter("ATC_RAT_RLL_D")))
if (rlld != self.get_parameter("ATC_RAT_RLL_D") or
rlli != self.get_parameter("ATC_RAT_RLL_I") or
rllp != self.get_parameter("ATC_RAT_RLL_P")):
raise NotAchievedException("AUTOTUNE gains still present")
# Use autotuned gains
self.set_rc(8, 1850)
self.delay_sim_time(1)
self.progress("AUTOTUNE testing gains are P:%f I:%f D:%f" %
(self.get_parameter("ATC_RAT_RLL_P"), self.get_parameter("ATC_RAT_RLL_I"),
self.get_parameter("ATC_RAT_RLL_D")))
if (rlld == self.get_parameter("ATC_RAT_RLL_D") or
rlli == self.get_parameter("ATC_RAT_RLL_I") or
rllp == self.get_parameter("ATC_RAT_RLL_P")):
raise NotAchievedException("AUTOTUNE gains not present in pilot testing")
# land without changing mode
self.set_rc(3, 1000)
self.wait_for_alt(0)
self.wait_disarmed()
# Check gains are still there after disarm
if (rlld == self.get_parameter("ATC_RAT_RLL_D") or
rlli == self.get_parameter("ATC_RAT_RLL_I") or
rllp == self.get_parameter("ATC_RAT_RLL_P")):
raise NotAchievedException("AUTOTUNE gains not present on disarm")
self.reboot_sitl()
# Check gains are still there after reboot
if (rlld == self.get_parameter("ATC_RAT_RLL_D") or
rlli == self.get_parameter("ATC_RAT_RLL_I") or
rllp == self.get_parameter("ATC_RAT_RLL_P")):
raise NotAchievedException("AUTOTUNE gains not present on reboot")
# Check FLTT is unchanged
if rllt != self.get_parameter("ATC_RAT_RLL_FLTT"):
raise NotAchievedException("AUTOTUNE FLTT was modified")
return
raise NotAchievedException("AUTOTUNE failed (%u seconds)" %
(self.get_sim_time() - tstart))
# fly_auto_test - fly mission which tests a significant number of commands
def fly_auto_test(self):
# Fly mission #1
self.progress("# Load copter_mission")
# load the waypoint count
num_wp = self.load_mission("copter_mission.txt", strict=False)
if not num_wp:
raise NotAchievedException("load copter_mission failed")
self.fly_loaded_mission(num_wp)
self.progress("Auto mission completed: passed!")
def fly_loaded_mission(self, num_wp):
'''fly mission loaded on vehicle. FIXME: get num_wp from vehicle'''
self.progress("test: Fly a mission from 1 to %u" % num_wp)
self.set_current_waypoint(1)
self.change_mode("LOITER")
self.wait_ready_to_arm()
self.arm_vehicle()
# switch into AUTO mode and raise throttle
self.change_mode("AUTO")
self.set_rc(3, 1500)
# fly the mission
self.wait_waypoint(0, num_wp-1, timeout=500)
# set throttle to minimum
self.zero_throttle()
# wait for disarm
self.wait_disarmed()
self.progress("MOTORS DISARMED OK")
# fly_auto_test using CAN GPS - fly mission which tests normal operation alongside CAN GPS
def fly_auto_test_using_can_gps(self):
self.set_parameter("CAN_P1_DRIVER", 1)
self.set_parameter("GPS_TYPE", 9)
self.set_parameter("GPS_TYPE2", 9)
self.set_parameter("SIM_GPS2_DISABLE", 0)
self.context_push()
self.set_parameter("ARMING_CHECK", 1 << 3)
self.context_collect('STATUSTEXT')
self.reboot_sitl()
# Test UAVCAN GPS ordering working
gps1_det_text = self.wait_text("GPS 1: specified as UAVCAN.*", regex=True, check_context=True)
gps2_det_text = self.wait_text("GPS 2: specified as UAVCAN.*", regex=True, check_context=True)
gps1_nodeid = int(gps1_det_text.split('-')[1])
gps2_nodeid = int(gps2_det_text.split('-')[1])
if gps1_nodeid is None or gps2_nodeid is None:
raise NotAchievedException("GPS not ordered per the order of Node IDs")
self.context_stop_collecting('STATUSTEXT')
GPS_Order_Tests = [[gps2_nodeid, gps2_nodeid, gps2_nodeid, 0,
"PreArm: Same Node Id {} set for multiple GPS".format(gps2_nodeid)],
[gps1_nodeid, int(gps2_nodeid/2), gps1_nodeid, 0,
"Selected GPS Node {} not set as instance {}".format(int(gps2_nodeid/2), 2)],
[int(gps1_nodeid/2), gps2_nodeid, 0, gps2_nodeid,
"Selected GPS Node {} not set as instance {}".format(int(gps1_nodeid/2), 1)],
[gps1_nodeid, gps2_nodeid, gps1_nodeid, gps2_nodeid, ""],
[gps2_nodeid, gps1_nodeid, gps2_nodeid, gps1_nodeid, ""],
[gps1_nodeid, 0, gps1_nodeid, gps2_nodeid, ""],
[0, gps2_nodeid, gps1_nodeid, gps2_nodeid, ""]]
for case in GPS_Order_Tests:
self.progress("############################### Trying Case: " + str(case))
self.set_parameter("GPS1_CAN_OVRIDE", case[0])
self.set_parameter("GPS2_CAN_OVRIDE", case[1])
self.drain_mav()
self.context_collect('STATUSTEXT')
self.reboot_sitl()
gps1_det_text = None
gps2_det_text = None
try:
gps1_det_text = self.wait_text("GPS 1: specified as UAVCAN.*", regex=True, check_context=True)
except AutoTestTimeoutException:
pass
try:
gps2_det_text = self.wait_text("GPS 2: specified as UAVCAN.*", regex=True, check_context=True)
except AutoTestTimeoutException:
pass
self.context_stop_collecting('STATUSTEXT')
self.change_mode('LOITER')
if case[2] == 0 and case[3] == 0:
if gps1_det_text or gps2_det_text:
raise NotAchievedException("Failed ordering for requested CASE:", case)
if case[2] == 0 or case[3] == 0:
if bool(gps1_det_text is not None) == bool(gps2_det_text is not None):
print(gps1_det_text)
print(gps2_det_text)
raise NotAchievedException("Failed ordering for requested CASE:", case)
if gps1_det_text:
if case[2] != int(gps1_det_text.split('-')[1]):
raise NotAchievedException("Failed ordering for requested CASE:", case)
if gps2_det_text:
if case[3] != int(gps2_det_text.split('-')[1]):
raise NotAchievedException("Failed ordering for requested CASE:", case)
if len(case[4]):
self.context_collect('STATUSTEXT')
self.run_cmd(mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,
1, # ARM
0,
0,
0,
0,
0,
0,
timeout=10,
want_result=mavutil.mavlink.MAV_RESULT_FAILED)
self.wait_statustext(case[4], check_context=True)
self.context_stop_collecting('STATUSTEXT')
self.progress("############################### All GPS Order Cases Tests Passed")
self.context_pop()
self.fly_auto_test()
def fly_motor_fail(self, fail_servo=0, fail_mul=0.0, holdtime=30):
"""Test flight with reduced motor efficiency"""
# we only expect an octocopter to survive ATM:
servo_counts = {
# 2: 6, # hexa
3: 8, # octa
# 5: 6, # Y6
}
frame_class = int(self.get_parameter("FRAME_CLASS"))
if frame_class not in servo_counts:
self.progress("Test not relevant for frame_class %u" % frame_class)
return
servo_count = servo_counts[frame_class]
if fail_servo < 0 or fail_servo > servo_count:
raise ValueError('fail_servo outside range for frame class')
self.takeoff(10, mode="LOITER")
self.change_alt(alt_min=50)
# Get initial values
start_hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
start_attitude = self.mav.recv_match(type='ATTITUDE', blocking=True)
hover_time = 5
try:
tstart = self.get_sim_time()
int_error_alt = 0
int_error_yaw_rate = 0
int_error_yaw = 0
self.progress("Hovering for %u seconds" % hover_time)
failed = False
while True:
now = self.get_sim_time_cached()
if now - tstart > holdtime + hover_time:
break
servo = self.mav.recv_match(type='SERVO_OUTPUT_RAW',
blocking=True)
hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
attitude = self.mav.recv_match(type='ATTITUDE', blocking=True)
if not failed and now - tstart > hover_time:
self.progress("Killing motor %u (%u%%)" %
(fail_servo+1, fail_mul))
self.set_parameter("SIM_ENGINE_FAIL", fail_servo)
self.set_parameter("SIM_ENGINE_MUL", fail_mul)
failed = True
if failed:
self.progress("Hold Time: %f/%f" % (now-tstart, holdtime))
servo_pwm = [servo.servo1_raw,
servo.servo2_raw,
servo.servo3_raw,
servo.servo4_raw,
servo.servo5_raw,
servo.servo6_raw,
servo.servo7_raw,
servo.servo8_raw]
self.progress("PWM output per motor")
for i, pwm in enumerate(servo_pwm[0:servo_count]):
if pwm > 1900:
state = "oversaturated"
elif pwm < 1200:
state = "undersaturated"
else:
state = "OK"
if failed and i == fail_servo:
state += " (failed)"
self.progress("servo %u [pwm=%u] [%s]" % (i+1, pwm, state))
alt_delta = hud.alt - start_hud.alt
yawrate_delta = attitude.yawspeed - start_attitude.yawspeed
yaw_delta = attitude.yaw - start_attitude.yaw
self.progress("Alt=%fm (delta=%fm)" % (hud.alt, alt_delta))
self.progress("Yaw rate=%f (delta=%f) (rad/s)" %
(attitude.yawspeed, yawrate_delta))
self.progress("Yaw=%f (delta=%f) (deg)" %
(attitude.yaw, yaw_delta))
dt = self.get_sim_time() - now
int_error_alt += abs(alt_delta/dt)
int_error_yaw_rate += abs(yawrate_delta/dt)
int_error_yaw += abs(yaw_delta/dt)
self.progress("## Error Integration ##")
self.progress(" Altitude: %fm" % int_error_alt)
self.progress(" Yaw rate: %f rad/s" % int_error_yaw_rate)
self.progress(" Yaw: %f deg" % int_error_yaw)
self.progress("----")
if int_error_yaw_rate > 0.1:
raise NotAchievedException("Vehicle is spinning")
if alt_delta < -20:
raise NotAchievedException("Vehicle is descending")
self.set_parameter("SIM_ENGINE_FAIL", 0)
self.set_parameter("SIM_ENGINE_MUL", 1.0)
except Exception as e:
self.set_parameter("SIM_ENGINE_FAIL", 0)
self.set_parameter("SIM_ENGINE_MUL", 1.0)
raise e
self.do_RTL()
def fly_motor_vibration(self):
"""Test flight with motor vibration"""
self.context_push()
ex = None
try:
self.set_rc_default()
# magic tridge EKF type that dramatically speeds up the test
self.set_parameters({
"AHRS_EKF_TYPE": 10,
"INS_LOG_BAT_MASK": 3,
"INS_LOG_BAT_OPT": 0,
"LOG_BITMASK": 958,
"LOG_DISARMED": 0,
"SIM_VIB_MOT_MAX": 350,
# these are real values taken from a 180mm Quad:
"SIM_GYR1_RND": 20,
"SIM_ACC1_RND": 5,
"SIM_ACC2_RND": 5,
"SIM_INS_THR_MIN": 0.1,
})
self.reboot_sitl()
self.takeoff(15, mode="ALT_HOLD")
hover_time = 15
tstart = self.get_sim_time()
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
tend = self.get_sim_time()
# if we don't reduce vibes here then the landing detector
# may not trigger
self.set_parameter("SIM_VIB_MOT_MAX", 0)
self.do_RTL()
psd = self.mavfft_fttd(1, 0, tstart * 1.0e6, tend * 1.0e6)
# ignore the first 20Hz and look for a peak at -15dB or more
ignore_bins = 20
freq = psd["F"][numpy.argmax(psd["X"][ignore_bins:]) + ignore_bins]
if numpy.amax(psd["X"][ignore_bins:]) < -15 or freq < 180 or freq > 300:
raise NotAchievedException(
"Did not detect a motor peak, found %f at %f dB" %
(freq, numpy.amax(psd["X"][ignore_bins:])))
else:
self.progress("Detected motor peak at %fHz" % freq)
# now add a notch and check that post-filter the peak is squashed below 40dB
self.set_parameters({
"INS_LOG_BAT_OPT": 2,
"INS_NOTCH_ENABLE": 1,
"INS_NOTCH_FREQ": freq,
"INS_NOTCH_ATT": 50,
"INS_NOTCH_BW": freq/2,
"SIM_VIB_MOT_MAX": 350,
})
self.reboot_sitl()
self.takeoff(15, mode="ALT_HOLD")
tstart = self.get_sim_time()
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
tend = self.get_sim_time()
self.set_parameter("SIM_VIB_MOT_MAX", 0)
self.do_RTL()
psd = self.mavfft_fttd(1, 0, tstart * 1.0e6, tend * 1.0e6)
freq = psd["F"][numpy.argmax(psd["X"][ignore_bins:]) + ignore_bins]
peakdB = numpy.amax(psd["X"][ignore_bins:])
if peakdB < -23:
self.progress("Did not detect a motor peak, found %f at %f dB" % (freq, peakdB))
else:
raise NotAchievedException("Detected peak %.1f Hz %.2f dB" % (freq, peakdB))
except Exception as e:
self.print_exception_caught(e)
ex = e
self.disarm_vehicle(force=True)
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def fly_vision_position(self):
"""Disable GPS navigation, enable Vicon input."""
# scribble down a location we can set origin to:
self.customise_SITL_commandline(["--uartF=sim:vicon:"])
self.progress("Waiting for location")
self.change_mode('LOITER')
self.wait_ready_to_arm()
old_pos = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
print("old_pos=%s" % str(old_pos))
self.context_push()
ex = None
try:
# configure EKF to use external nav instead of GPS
ahrs_ekf_type = self.get_parameter("AHRS_EKF_TYPE")
if ahrs_ekf_type == 2:
self.set_parameter("EK2_GPS_TYPE", 3)
if ahrs_ekf_type == 3:
self.set_parameter("EK3_SRC1_POSXY", 6)
self.set_parameter("EK3_SRC1_VELXY", 6)
self.set_parameter("EK3_SRC1_POSZ", 6)
self.set_parameter("EK3_SRC1_VELZ", 6)
self.set_parameter("GPS_TYPE", 0)
self.set_parameter("VISO_TYPE", 1)
self.set_parameter("SERIAL5_PROTOCOL", 1)
self.reboot_sitl()
# without a GPS or some sort of external prompting, AP
# doesn't send system_time messages. So prompt it:
self.mav.mav.system_time_send(int(time.time() * 1000000), 0)
self.progress("Waiting for non-zero-lat")
tstart = self.get_sim_time()
while True:
self.mav.mav.set_gps_global_origin_send(1,
old_pos.lat,
old_pos.lon,
old_pos.alt)
gpi = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
self.progress("gpi=%s" % str(gpi))
if gpi.lat != 0:
break
if self.get_sim_time_cached() - tstart > 60:
raise AutoTestTimeoutException("Did not get non-zero lat")
self.takeoff()
self.set_rc(1, 1600)
tstart = self.get_sim_time()
while True:
vicon_pos = self.mav.recv_match(type='VISION_POSITION_ESTIMATE',
blocking=True)
# print("vpe=%s" % str(vicon_pos))
self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
# self.progress("gpi=%s" % str(gpi))
if vicon_pos.x > 40:
break
if self.get_sim_time_cached() - tstart > 100:
raise AutoTestTimeoutException("Vicon showed no movement")
# recenter controls:
self.set_rc(1, 1500)
self.progress("# Enter RTL")
self.change_mode('RTL')
self.set_rc(3, 1500)
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 200:
raise NotAchievedException("Did not disarm")
self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
# print("gpi=%s" % str(gpi))
self.mav.recv_match(type='SIMSTATE',
blocking=True)
# print("ss=%s" % str(ss))
# wait for RTL disarm:
if not self.armed():
break
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.zero_throttle()
self.reboot_sitl()
if ex is not None:
raise ex
def fly_body_frame_odom(self):
"""Disable GPS navigation, enable input of VISION_POSITION_DELTA."""
if self.get_parameter("AHRS_EKF_TYPE") != 3:
# only tested on this EKF
return
self.customise_SITL_commandline(["--uartF=sim:vicon:"])
if self.current_onboard_log_contains_message("XKFD"):
raise NotAchievedException("Found unexpected XKFD message")
# scribble down a location we can set origin to:
self.progress("Waiting for location")
self.change_mode('LOITER')
self.wait_ready_to_arm()
old_pos = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
print("old_pos=%s" % str(old_pos))
# configure EKF to use external nav instead of GPS
self.set_parameters({
"EK3_SRC1_POSXY": 6,
"EK3_SRC1_VELXY": 6,
"EK3_SRC1_POSZ": 6,
"EK3_SRC1_VELZ": 6,
"GPS_TYPE": 0,
"VISO_TYPE": 1,
"SERIAL5_PROTOCOL": 1,
"SIM_VICON_TMASK": 8, # send VISION_POSITION_DELTA
})
self.reboot_sitl()
# without a GPS or some sort of external prompting, AP
# doesn't send system_time messages. So prompt it:
self.mav.mav.system_time_send(int(time.time() * 1000000), 0)
self.progress("Waiting for non-zero-lat")
tstart = self.get_sim_time()
while True:
self.mav.mav.set_gps_global_origin_send(1,
old_pos.lat,
old_pos.lon,
old_pos.alt)
gpi = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
self.progress("gpi=%s" % str(gpi))
if gpi.lat != 0:
break
if self.get_sim_time_cached() - tstart > 60:
raise AutoTestTimeoutException("Did not get non-zero lat")
self.takeoff(alt_min=5, mode='ALT_HOLD', require_absolute=False, takeoff_throttle=1800)
self.change_mode('LAND')
# TODO: something more elaborate here - EKF will only aid
# relative position
self.wait_disarmed()
if not self.current_onboard_log_contains_message("XKFD"):
raise NotAchievedException("Did not find expected XKFD message")
def fly_gps_vicon_switching(self):
"""Fly GPS and Vicon switching test"""
self.customise_SITL_commandline(["--uartF=sim:vicon:"])
"""Setup parameters including switching to EKF3"""
self.context_push()
ex = None
try:
self.set_parameters({
"VISO_TYPE": 2, # enable vicon
"SERIAL5_PROTOCOL": 2,
"EK3_ENABLE": 1,
"EK3_SRC2_POSXY": 6, # External Nav
"EK3_SRC2_POSZ": 6, # External Nav
"EK3_SRC2_VELXY": 6, # External Nav
"EK3_SRC2_VELZ": 6, # External Nav
"EK3_SRC2_YAW": 6, # External Nav
"RC7_OPTION": 80, # RC aux switch 7 set to Viso Align
"RC8_OPTION": 90, # RC aux switch 8 set to EKF source selector
"EK2_ENABLE": 0,
"AHRS_EKF_TYPE": 3,
})
self.reboot_sitl()
# switch to use GPS
self.set_rc(8, 1000)
# ensure we can get a global position:
self.poll_home_position(timeout=120)
# record starting position
old_pos = self.get_global_position_int()
print("old_pos=%s" % str(old_pos))
# align vicon yaw with ahrs heading
self.set_rc(7, 2000)
# takeoff to 10m in Loiter
self.progress("Moving to ensure location is tracked")
self.takeoff(10, mode="LOITER", require_absolute=True, timeout=720)
# fly forward in Loiter
self.set_rc(2, 1300)
# disable vicon
self.set_parameter("SIM_VICON_FAIL", 1)
# ensure vehicle remain in Loiter for 15 seconds
tstart = self.get_sim_time()
while self.get_sim_time() - tstart < 15:
if not self.mode_is('LOITER'):
raise NotAchievedException("Expected to stay in loiter for >15 seconds")
# re-enable vicon
self.set_parameter("SIM_VICON_FAIL", 0)
# switch to vicon, disable GPS and wait 10sec to ensure vehicle remains in Loiter
self.set_rc(8, 1500)
self.set_parameter("GPS_TYPE", 0)
# ensure vehicle remain in Loiter for 15 seconds
tstart = self.get_sim_time()
while self.get_sim_time() - tstart < 15:
if not self.mode_is('LOITER'):
raise NotAchievedException("Expected to stay in loiter for >15 seconds")
# RTL and check vehicle arrives within 10m of home
self.set_rc(2, 1500)
self.do_RTL()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_rtl_speed(self):
"""Test RTL Speed parameters"""
rtl_speed_ms = 7
wpnav_speed_ms = 4
wpnav_accel_mss = 3
tolerance = 0.5
self.load_mission("copter_rtl_speed.txt")
self.set_parameter('WPNAV_ACCEL', wpnav_accel_mss * 100)
self.set_parameter('RTL_SPEED', rtl_speed_ms * 100)
self.set_parameter('WPNAV_SPEED', wpnav_speed_ms * 100)
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1600)
self.wait_altitude(19, 25, relative=True)
self.wait_groundspeed(wpnav_speed_ms-tolerance, wpnav_speed_ms+tolerance)
self.monitor_groundspeed(wpnav_speed_ms, timeout=20)
self.change_mode('RTL')
self.wait_groundspeed(rtl_speed_ms-tolerance, rtl_speed_ms+tolerance)
self.monitor_groundspeed(rtl_speed_ms, timeout=5)
self.change_mode('AUTO')
self.wait_groundspeed(0-tolerance, 0+tolerance)
self.wait_groundspeed(wpnav_speed_ms-tolerance, wpnav_speed_ms+tolerance)
self.monitor_groundspeed(wpnav_speed_ms, tolerance=0.6, timeout=5)
self.do_RTL()
def fly_nav_delay(self):
"""Fly a simple mission that has a delay in it."""
self.load_mission("copter_nav_delay.txt")
self.set_parameter("DISARM_DELAY", 0)
self.change_mode("LOITER")
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode("AUTO")
self.set_rc(3, 1600)
count_start = -1
count_stop = -1
tstart = self.get_sim_time()
last_mission_current_msg = 0
last_seq = None
while self.armed(): # we RTL at end of mission
now = self.get_sim_time_cached()
if now - tstart > 200:
raise AutoTestTimeoutException("Did not disarm as expected")
m = self.mav.recv_match(type='MISSION_CURRENT', blocking=True)
at_delay_item = ""
if m.seq == 3:
at_delay_item = "(At delay item)"
if count_start == -1:
count_start = now
if ((now - last_mission_current_msg) > 1 or m.seq != last_seq):
dist = None
x = self.mav.messages.get("NAV_CONTROLLER_OUTPUT", None)
if x is not None:
dist = x.wp_dist
self.progress("MISSION_CURRENT.seq=%u dist=%s %s" %
(m.seq, dist, at_delay_item))
last_mission_current_msg = self.get_sim_time_cached()
last_seq = m.seq
if m.seq > 3:
if count_stop == -1:
count_stop = now
calculated_delay = count_stop - count_start
want_delay = 59 # should reflect what's in the mission file
self.progress("Stopped for %u seconds (want >=%u seconds)" %
(calculated_delay, want_delay))
if calculated_delay < want_delay:
raise NotAchievedException("Did not delay for long enough")
def test_rangefinder(self):
ex = None
self.context_push()
self.progress("Making sure we don't ordinarily get RANGEFINDER")
m = self.mav.recv_match(type='RANGEFINDER',
blocking=True,
timeout=5)
if m is not None:
raise NotAchievedException("Received unexpected RANGEFINDER msg")
# may need to force a rotation if some other test has used the
# rangefinder...
self.progress("Ensure no RFND messages in log")
self.set_parameter("LOG_DISARMED", 1)
if self.current_onboard_log_contains_message("RFND"):
raise NotAchievedException("Found unexpected RFND message")
try:
self.set_analog_rangefinder_parameters()
self.set_parameter("RC9_OPTION", 10) # rangefinder
self.set_rc(9, 2000)
self.reboot_sitl()
self.progress("Making sure we now get RANGEFINDER messages")
m = self.mav.recv_match(type='RANGEFINDER',
blocking=True,
timeout=10)
if m is None:
raise NotAchievedException("Did not get expected RANGEFINDER msg")
self.progress("Checking RangeFinder is marked as enabled in mavlink")
m = self.mav.recv_match(type='SYS_STATUS',
blocking=True,
timeout=10)
flags = m.onboard_control_sensors_enabled
if not flags & mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION:
raise NotAchievedException("Laser not enabled in SYS_STATUS")
self.progress("Disabling laser using switch")
self.set_rc(9, 1000)
self.delay_sim_time(1)
self.progress("Checking RangeFinder is marked as disabled in mavlink")
m = self.mav.recv_match(type='SYS_STATUS',
blocking=True,
timeout=10)
flags = m.onboard_control_sensors_enabled
if flags & mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION:
raise NotAchievedException("Laser enabled in SYS_STATUS")
self.progress("Re-enabling rangefinder")
self.set_rc(9, 2000)
self.delay_sim_time(1)
m = self.mav.recv_match(type='SYS_STATUS',
blocking=True,
timeout=10)
flags = m.onboard_control_sensors_enabled
if not flags & mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION:
raise NotAchievedException("Laser not enabled in SYS_STATUS")
self.takeoff(10, mode="LOITER")
m_r = self.mav.recv_match(type='RANGEFINDER',
blocking=True)
m_p = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
if abs(m_r.distance - m_p.relative_alt/1000) > 1:
raise NotAchievedException(
"rangefinder/global position int mismatch %0.2f vs %0.2f" %
(m_r.distance, m_p.relative_alt/1000))
self.land_and_disarm()
if not self.current_onboard_log_contains_message("RFND"):
raise NotAchievedException("Did not see expected RFND message")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def test_terrain_spline_mission(self):
self.set_parameter("TERRAIN_ENABLE", 0)
self.fly_mission("wp.txt")
def WPNAV_SPEED(self):
'''ensure resetting WPNAV_SPEED works'''
loc = self.poll_home_position()
alt = 20
loc.alt = alt
items = []
# 100 waypoints in a line, 10m apart in a northerly direction
# for i in range(1, 100):
# items.append((mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, i*10, 0, alt))
# 1 waypoint a long way away
items.append((mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 2000, 0, alt),)
items.append((mavutil.mavlink.MAV_CMD_NAV_RETURN_TO_LAUNCH, 0, 0, 0))
self.upload_simple_relhome_mission(items)
start_speed_ms = self.get_parameter('WPNAV_SPEED') / 100.0
self.takeoff(20)
self.change_mode('AUTO')
self.wait_groundspeed(start_speed_ms-1, start_speed_ms+1, minimum_duration=10)
for speed_ms in 7, 8, 7, 8, 9, 10, 11, 7:
self.set_parameter('WPNAV_SPEED', speed_ms*100)
self.wait_groundspeed(speed_ms-1, speed_ms+1, minimum_duration=10)
self.do_RTL()
def WPNAV_SPEED_UP(self):
'''ensure resetting WPNAV_SPEED_UP works'''
items = []
# 1 waypoint a long way up
items.append((mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 20000),)
items.append((mavutil.mavlink.MAV_CMD_NAV_RETURN_TO_LAUNCH, 0, 0, 0))
self.upload_simple_relhome_mission(items)
start_speed_ms = self.get_parameter('WPNAV_SPEED_UP') / 100.0
minimum_duration = 5
self.takeoff(20)
self.change_mode('AUTO')
self.wait_climbrate(start_speed_ms-1, start_speed_ms+1, minimum_duration=minimum_duration)
for speed_ms in 7, 8, 7, 8, 6, 2:
self.set_parameter('WPNAV_SPEED_UP', speed_ms*100)
self.wait_climbrate(speed_ms-1, speed_ms+1, minimum_duration=minimum_duration)
self.do_RTL(timeout=240)
def WPNAV_SPEED_DN(self):
'''ensure resetting WPNAV_SPEED_DN works'''
items = []
# 1 waypoint a long way back down
items.append((mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 10),)
items.append((mavutil.mavlink.MAV_CMD_NAV_RETURN_TO_LAUNCH, 0, 0, 0))
self.upload_simple_relhome_mission(items)
minimum_duration = 5
self.takeoff(500, timeout=60)
self.change_mode('AUTO')
start_speed_ms = self.get_parameter('WPNAV_SPEED_DN') / 100.0
self.wait_climbrate(-start_speed_ms-1, -start_speed_ms+1, minimum_duration=minimum_duration)
for speed_ms in 7, 8, 7, 8, 6, 2:
self.set_parameter('WPNAV_SPEED_DN', speed_ms*100)
self.wait_climbrate(-speed_ms-1, -speed_ms+1, minimum_duration=minimum_duration)
self.do_RTL()
def fly_mission(self, filename, strict=True):
num_wp = self.load_mission(filename, strict=strict)
self.set_parameter("AUTO_OPTIONS", 3)
self.change_mode('AUTO')
self.wait_ready_to_arm()
self.arm_vehicle()
self.wait_waypoint(num_wp-1, num_wp-1)
self.wait_disarmed()
def test_surface_tracking(self):
ex = None
self.context_push()
# we must start mavproxy here as otherwise we can't get the
# terrain database tiles - this leads to random failures in
# CI!
mavproxy = self.start_mavproxy()
try:
self.set_analog_rangefinder_parameters()
self.set_parameter("RC9_OPTION", 10) # rangefinder
self.set_rc(9, 2000)
self.reboot_sitl() # needed for both rangefinder and initial position
self.assert_vehicle_location_is_at_startup_location()
self.takeoff(10, mode="LOITER")
lower_surface_pos = mavutil.location(-35.362421, 149.164534, 584, 270)
here = self.mav.location()
bearing = self.get_bearing(here, lower_surface_pos)
self.change_mode("GUIDED")
self.guided_achieve_heading(bearing)
self.change_mode("LOITER")
self.delay_sim_time(2)
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
orig_absolute_alt_mm = m.alt
self.progress("Original alt: absolute=%f" % orig_absolute_alt_mm)
self.progress("Flying somewhere which surface is known lower compared to takeoff point")
self.set_rc(2, 1450)
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 200:
raise NotAchievedException("Did not reach lower point")
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
x = mavutil.location(m.lat/1e7, m.lon/1e7, m.alt/1e3, 0)
dist = self.get_distance(x, lower_surface_pos)
delta = (orig_absolute_alt_mm - m.alt)/1000.0
self.progress("Distance: %fm abs-alt-delta: %fm" %
(dist, delta))
if dist < 15:
if delta < 0.8:
raise NotAchievedException("Did not dip in altitude as expected")
break
self.set_rc(2, 1500)
self.do_RTL()
except Exception as e:
self.print_exception_caught(e)
self.disarm_vehicle(force=True)
ex = e
self.stop_mavproxy(mavproxy)
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def test_rangefinder_switchover(self):
"""test that the EKF correctly handles the switchover between baro and rangefinder"""
ex = None
self.context_push()
try:
self.set_analog_rangefinder_parameters()
self.set_parameters({
"RNGFND1_MAX_CM": 1500
})
# configure EKF to use rangefinder for altitude at low altitudes
ahrs_ekf_type = self.get_parameter("AHRS_EKF_TYPE")
if ahrs_ekf_type == 2:
self.set_parameter("EK2_RNG_USE_HGT", 70)
if ahrs_ekf_type == 3:
self.set_parameter("EK3_RNG_USE_HGT", 70)
self.reboot_sitl() # needed for both rangefinder and initial position
self.assert_vehicle_location_is_at_startup_location()
self.change_mode("LOITER")
self.wait_ready_to_arm()
self.arm_vehicle()
self.set_rc(3, 1800)
self.set_rc(2, 1200)
# wait till we get to 50m
self.wait_altitude(50, 52, True, 60)
self.change_mode("RTL")
# wait till we get to 25m
self.wait_altitude(25, 27, True, 120)
# level up
self.set_rc(2, 1500)
self.wait_altitude(14, 15, relative=True)
self.wait_rtl_complete()
except Exception as e:
self.print_exception_caught(e)
self.disarm_vehicle(force=True)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def test_parachute(self):
self.set_rc(9, 1000)
self.set_parameter("CHUTE_ENABLED", 1)
self.set_parameter("CHUTE_TYPE", 10)
self.set_parameter("SERVO9_FUNCTION", 27)
self.set_parameter("SIM_PARA_ENABLE", 1)
self.set_parameter("SIM_PARA_PIN", 9)
self.progress("Test triggering parachute in mission")
self.load_mission("copter_parachute_mission.txt")
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1600)
self.wait_statustext('BANG', timeout=60)
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.progress("Test triggering with mavlink message")
self.takeoff(20)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_PARACHUTE,
2, # release
0,
0,
0,
0,
0,
0)
self.wait_statustext('BANG', timeout=60)
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.progress("Testing three-position switch")
self.set_parameter("RC9_OPTION", 23) # parachute 3pos
self.progress("Test manual triggering")
self.takeoff(20)
self.set_rc(9, 2000)
self.wait_statustext('BANG', timeout=60)
self.set_rc(9, 1000)
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.context_push()
self.progress("Crashing with 3pos switch in enable position")
self.takeoff(40)
self.set_rc(9, 1500)
self.set_parameter("SIM_ENGINE_MUL", 0)
self.set_parameter("SIM_ENGINE_FAIL", 1)
self.wait_statustext('BANG', timeout=60)
self.set_rc(9, 1000)
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.context_pop()
self.progress("Crashing with 3pos switch in disable position")
loiter_alt = 10
self.takeoff(loiter_alt, mode='LOITER')
self.set_rc(9, 1100)
self.set_parameter("SIM_ENGINE_MUL", 0)
self.set_parameter("SIM_ENGINE_FAIL", 1)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + 5:
m = self.mav.recv_match(type='STATUSTEXT', blocking=True, timeout=1)
if m is None:
continue
if "BANG" in m.text:
self.set_rc(9, 1000)
self.reboot_sitl()
raise NotAchievedException("Parachute deployed when disabled")
self.set_rc(9, 1000)
self.disarm_vehicle(force=True)
self.reboot_sitl()
def test_motortest(self, timeout=60):
self.start_subtest("Testing PWM output")
pwm_in = 1300
# default frame is "+" - start motor of 2 is "B", which is
# motor 1... see
# https://ardupilot.org/copter/docs/connect-escs-and-motors.html
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOTOR_TEST,
2, # start motor
mavutil.mavlink.MOTOR_TEST_THROTTLE_PWM,
pwm_in, # pwm-to-output
2, # timeout in seconds
2, # number of motors to output
0, # compass learning
0,
timeout=timeout)
# long timeouts here because there's a pause before we start motors
self.wait_servo_channel_value(1, pwm_in, timeout=10)
self.wait_servo_channel_value(4, pwm_in, timeout=10)
self.wait_statustext("finished motor test")
self.end_subtest("Testing PWM output")
self.start_subtest("Testing percentage output")
percentage = 90.1
# since MOT_SPIN_MIN and MOT_SPIN_MAX are not set, the RC3
# min/max are used.
expected_pwm = 1000 + (self.get_parameter("RC3_MAX") - self.get_parameter("RC3_MIN")) * percentage/100.0
self.progress("expected pwm=%f" % expected_pwm)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOTOR_TEST,
2, # start motor
mavutil.mavlink.MOTOR_TEST_THROTTLE_PERCENT,
percentage, # pwm-to-output
2, # timeout in seconds
2, # number of motors to output
0, # compass learning
0,
timeout=timeout)
self.wait_servo_channel_value(1, expected_pwm, timeout=10)
self.wait_servo_channel_value(4, expected_pwm, timeout=10)
self.wait_statustext("finished motor test")
self.end_subtest("Testing percentage output")
def fly_precision_landing_drivers(self):
"""Use PrecLand backends precision messages to land aircraft."""
self.context_push()
for backend in [4, 2]: # SITL, SITL-IRLOCK
ex = None
try:
self.set_parameter("PLND_ENABLED", 1)
self.set_parameter("PLND_TYPE", backend)
self.set_analog_rangefinder_parameters()
self.set_parameter("SIM_SONAR_SCALE", 12)
start = self.mav.location()
target = start
(target.lat, target.lng) = mavextra.gps_offset(start.lat, start.lng, 4, -4)
self.progress("Setting target to %f %f" % (target.lat, target.lng))
self.set_parameter("SIM_PLD_ENABLE", 1)
self.set_parameter("SIM_PLD_LAT", target.lat)
self.set_parameter("SIM_PLD_LON", target.lng)
self.set_parameter("SIM_PLD_HEIGHT", 0)
self.set_parameter("SIM_PLD_ALT_LMT", 15)
self.set_parameter("SIM_PLD_DIST_LMT", 10)
self.reboot_sitl()
self.progress("Waiting for location")
self.zero_throttle()
self.takeoff(10, 1800, mode="LOITER")
self.change_mode("LAND")
self.zero_throttle()
self.wait_landed_and_disarmed()
self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
new_pos = self.mav.location()
delta = self.get_distance(target, new_pos)
self.progress("Landed %f metres from target position" % delta)
max_delta = 1
if delta > max_delta:
raise NotAchievedException("Did not land close enough to target position (%fm > %fm" % (delta, max_delta))
if not self.current_onboard_log_contains_message("PL"):
raise NotAchievedException("Did not see expected PL message")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.reboot_sitl()
self.zero_throttle()
self.context_pop()
self.reboot_sitl()
self.progress("All done")
if ex is not None:
raise ex
def get_system_clock_utc(self, time_seconds):
# this is a copy of ArduPilot's AP_RTC function!
# separate time into ms, sec, min, hour and days but all expressed
# in milliseconds
time_ms = time_seconds * 1000
ms = time_ms % 1000
sec_ms = (time_ms % (60 * 1000)) - ms
min_ms = (time_ms % (60 * 60 * 1000)) - sec_ms - ms
hour_ms = (time_ms % (24 * 60 * 60 * 1000)) - min_ms - sec_ms - ms
# convert times as milliseconds into appropriate units
secs = sec_ms / 1000
mins = min_ms / (60 * 1000)
hours = hour_ms / (60 * 60 * 1000)
return (hours, mins, secs, 0)
def calc_delay(self, seconds, delay_for_seconds):
# delay-for-seconds has to be long enough that we're at the
# waypoint before that time. Otherwise we'll try to wait a
# day....
if delay_for_seconds >= 3600:
raise ValueError("Won't handle large delays")
(hours,
mins,
secs,
ms) = self.get_system_clock_utc(seconds)
self.progress("Now is %uh %um %us" % (hours, mins, secs))
secs += delay_for_seconds # add seventeen seconds
mins += int(secs/60)
secs %= 60
hours += int(mins / 60)
mins %= 60
if hours > 24:
raise ValueError("Way too big a delay")
self.progress("Delay until %uh %um %us" %
(hours, mins, secs))
return (hours, mins, secs, 0)
def reset_delay_item(self, seq, seconds_in_future):
frame = mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT
command = mavutil.mavlink.MAV_CMD_NAV_DELAY
# retrieve mission item and check it:
tried_set = False
hours = None
mins = None
secs = None
while True:
self.progress("Requesting item")
self.mav.mav.mission_request_send(1,
1,
seq)
st = self.mav.recv_match(type='MISSION_ITEM',
blocking=True,
timeout=1)
if st is None:
continue
print("Item: %s" % str(st))
have_match = (tried_set and
st.seq == seq and
st.command == command and
st.param2 == hours and
st.param3 == mins and
st.param4 == secs)
if have_match:
return
self.progress("Mission mismatch")
m = None
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 3:
raise NotAchievedException(
"Did not receive MISSION_REQUEST")
self.mav.mav.mission_write_partial_list_send(1,
1,
seq,
seq)
m = self.mav.recv_match(type='MISSION_REQUEST',
blocking=True,
timeout=1)
if m is None:
continue
if m.seq != st.seq:
continue
break
self.progress("Sending absolute-time mission item")
# we have to change out the delay time...
now = self.mav.messages["SYSTEM_TIME"]
if now is None:
raise PreconditionFailedException("Never got SYSTEM_TIME")
if now.time_unix_usec == 0:
raise PreconditionFailedException("system time is zero")
(hours, mins, secs, ms) = self.calc_delay(now.time_unix_usec/1000000, seconds_in_future)
self.mav.mav.mission_item_send(
1, # target system
1, # target component
seq, # seq
frame, # frame
command, # command
0, # current
1, # autocontinue
0, # p1 (relative seconds)
hours, # p2
mins, # p3
secs, # p4
0, # p5
0, # p6
0) # p7
tried_set = True
ack = self.mav.recv_match(type='MISSION_ACK',
blocking=True,
timeout=1)
self.progress("Received ack: %s" % str(ack))
def fly_nav_delay_abstime(self):
"""fly a simple mission that has a delay in it"""
self.fly_nav_delay_abstime_x(87)
def fly_nav_delay_abstime_x(self, delay_for, expected_delay=None):
"""fly a simple mission that has a delay in it, expect a delay"""
if expected_delay is None:
expected_delay = delay_for
self.load_mission("copter_nav_delay.txt")
self.change_mode("LOITER")
self.wait_ready_to_arm()
delay_item_seq = 3
self.reset_delay_item(delay_item_seq, delay_for)
delay_for_seconds = delay_for
reset_at_m = self.mav.recv_match(type='SYSTEM_TIME', blocking=True)
reset_at = reset_at_m.time_unix_usec/1000000
self.arm_vehicle()
self.change_mode("AUTO")
self.set_rc(3, 1600)
count_stop = -1
tstart = self.get_sim_time()
while self.armed(): # we RTL at end of mission
now = self.get_sim_time_cached()
if now - tstart > 240:
raise AutoTestTimeoutException("Did not disarm as expected")
m = self.mav.recv_match(type='MISSION_CURRENT', blocking=True)
at_delay_item = ""
if m.seq == delay_item_seq:
at_delay_item = "(delay item)"
self.progress("MISSION_CURRENT.seq=%u %s" % (m.seq, at_delay_item))
if m.seq > delay_item_seq:
if count_stop == -1:
count_stop_m = self.mav.recv_match(type='SYSTEM_TIME',
blocking=True)
count_stop = count_stop_m.time_unix_usec/1000000
calculated_delay = count_stop - reset_at
error = abs(calculated_delay - expected_delay)
self.progress("Stopped for %u seconds (want >=%u seconds)" %
(calculated_delay, delay_for_seconds))
if error > 2:
raise NotAchievedException("delay outside expectations")
def fly_nav_takeoff_delay_abstime(self):
"""make sure taking off at a specific time works"""
self.load_mission("copter_nav_delay_takeoff.txt")
self.change_mode("LOITER")
self.wait_ready_to_arm()
delay_item_seq = 2
delay_for_seconds = 77
self.reset_delay_item(delay_item_seq, delay_for_seconds)
reset_at = self.get_sim_time_cached()
self.arm_vehicle()
self.change_mode("AUTO")
self.set_rc(3, 1600)
# should not take off for about least 77 seconds
tstart = self.get_sim_time()
took_off = False
while self.armed():
now = self.get_sim_time_cached()
if now - tstart > 200:
# timeout
break
m = self.mav.recv_match(type='MISSION_CURRENT', blocking=True)
now = self.get_sim_time_cached()
self.progress("%s" % str(m))
if m.seq > delay_item_seq:
if not took_off:
took_off = True
delta_time = now - reset_at
if abs(delta_time - delay_for_seconds) > 2:
raise NotAchievedException((
"Did not take off on time "
"measured=%f want=%f" %
(delta_time, delay_for_seconds)))
if not took_off:
raise NotAchievedException("Did not take off")
def fly_zigzag_mode(self):
'''test zigzag mode'''
# set channel 8 for zigzag savewp and recentre it
self.set_parameter("RC8_OPTION", 61)
self.takeoff(alt_min=5, mode='LOITER')
ZIGZAG = 24
j = 0
slowdown_speed = 0.3 # because Copter takes a long time to actually stop
self.start_subtest("Conduct ZigZag test for all 4 directions")
while j < 4:
self.progress("## Align heading with the run-way (j=%d)##" % j)
self.set_rc(8, 1500)
self.set_rc(4, 1420)
self.wait_heading(352-j*90)
self.set_rc(4, 1500)
self.change_mode(ZIGZAG)
self.progress("## Record Point A ##")
self.set_rc(8, 1100) # record point A
self.set_rc(1, 1700) # fly side-way for 20m
self.wait_distance(20)
self.set_rc(1, 1500)
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.progress("## Record Point A ##")
self.set_rc(8, 1500) # pilot always have to cross mid position when changing for low to high position
self.set_rc(8, 1900) # record point B
i = 1
while i < 2:
self.start_subtest("Run zigzag A->B and B->A (i=%d)" % i)
self.progress("## fly forward for 10 meter ##")
self.set_rc(2, 1300)
self.wait_distance(10)
self.set_rc(2, 1500) # re-centre pitch rc control
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.set_rc(8, 1500) # switch to mid position
self.progress("## auto execute vector BA ##")
self.set_rc(8, 1100)
self.wait_distance(17) # wait for it to finish
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.progress("## fly forward for 10 meter ##")
self.set_rc(2, 1300) # fly forward for 10 meter
self.wait_distance(10)
self.set_rc(2, 1500) # re-centre pitch rc control
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.set_rc(8, 1500) # switch to mid position
self.progress("## auto execute vector AB ##")
self.set_rc(8, 1900)
self.wait_distance(17) # wait for it to finish
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
i = i + 1
# test the case when pilot switch to manual control during the auto flight
self.start_subtest("test the case when pilot switch to manual control during the auto flight")
self.progress("## fly forward for 10 meter ##")
self.set_rc(2, 1300) # fly forward for 10 meter
self.wait_distance(10)
self.set_rc(2, 1500) # re-centre pitch rc control
self.wait_groundspeed(0, 0.3) # wait until the copter slows down
self.set_rc(8, 1500) # switch to mid position
self.progress("## auto execute vector BA ##")
self.set_rc(8, 1100) # switch to low position, auto execute vector BA
self.wait_distance(8) # purposely switch to manual halfway
self.set_rc(8, 1500)
self.wait_groundspeed(0, slowdown_speed) # copter should slow down here
self.progress("## Manual control to fly forward ##")
self.set_rc(2, 1300) # manual control to fly forward
self.wait_distance(8)
self.set_rc(2, 1500) # re-centre pitch rc control
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.progress("## continue vector BA ##")
self.set_rc(8, 1100) # copter should continue mission here
self.wait_distance(8) # wait for it to finish rest of BA
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.set_rc(8, 1500) # switch to mid position
self.progress("## auto execute vector AB ##")
self.set_rc(8, 1900) # switch to execute AB again
self.wait_distance(17) # wait for it to finish
self.wait_groundspeed(0, slowdown_speed) # wait until the copter slows down
self.change_mode('LOITER')
j = j + 1
self.do_RTL()
def test_setting_modes_via_modeswitch(self):
self.context_push()
ex = None
try:
fltmode_ch = 5
self.set_parameter("FLTMODE_CH", fltmode_ch)
self.set_rc(fltmode_ch, 1000) # PWM for mode1
testmodes = [("FLTMODE1", 4, "GUIDED", 1165),
("FLTMODE2", 13, "SPORT", 1295),
("FLTMODE3", 6, "RTL", 1425),
("FLTMODE4", 7, "CIRCLE", 1555),
("FLTMODE5", 1, "ACRO", 1685),
("FLTMODE6", 17, "BRAKE", 1815),
]
for mode in testmodes:
(parm, parm_value, name, pwm) = mode
self.set_parameter(parm, parm_value)
for mode in reversed(testmodes):
(parm, parm_value, name, pwm) = mode
self.set_rc(fltmode_ch, pwm)
self.wait_mode(name)
for mode in testmodes:
(parm, parm_value, name, pwm) = mode
self.set_rc(fltmode_ch, pwm)
self.wait_mode(name)
for mode in reversed(testmodes):
(parm, parm_value, name, pwm) = mode
self.set_rc(fltmode_ch, pwm)
self.wait_mode(name)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def test_setting_modes_via_auxswitch(self):
self.context_push()
ex = None
try:
fltmode_ch = int(self.get_parameter("FLTMODE_CH"))
self.set_rc(fltmode_ch, 1000)
self.wait_mode("CIRCLE")
self.set_rc(9, 1000)
self.set_rc(10, 1000)
self.set_parameter("RC9_OPTION", 18) # land
self.set_parameter("RC10_OPTION", 55) # guided
self.set_rc(9, 1900)
self.wait_mode("LAND")
self.set_rc(10, 1900)
self.wait_mode("GUIDED")
self.set_rc(10, 1000) # this re-polls the mode switch
self.wait_mode("CIRCLE")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def fly_guided_stop(self,
timeout=20,
groundspeed_tolerance=0.05,
climb_tolerance=0.01):
"""stop the vehicle moving in guided mode"""
self.progress("Stopping vehicle")
tstart = self.get_sim_time()
# send a position-control command
self.mav.mav.set_position_target_local_ned_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_BODY_NED,
MAV_POS_TARGET_TYPE_MASK.POS_ONLY | MAV_POS_TARGET_TYPE_MASK.LAST_BYTE, # mask specifying use-only-x-y-z
0, # x
0, # y
0, # z
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Vehicle did not stop")
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
print("%s" % str(m))
if (m.groundspeed < groundspeed_tolerance and
m.climb < climb_tolerance):
break
def fly_guided_move_global_relative_alt(self, lat, lon, alt):
startpos = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
self.mav.mav.set_position_target_global_int_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
MAV_POS_TARGET_TYPE_MASK.POS_ONLY, # mask specifying use-only-lat-lon-alt
lat, # lat
lon, # lon
alt, # alt
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 200:
raise NotAchievedException("Did not move far enough")
# send a position-control command
pos = self.mav.recv_match(type='GLOBAL_POSITION_INT',
blocking=True)
delta = self.get_distance_int(startpos, pos)
self.progress("delta=%f (want >10)" % delta)
if delta > 10:
break
def fly_guided_move_local(self, x, y, z_up, timeout=100):
"""move the vehicle using MAVLINK_MSG_ID_SET_POSITION_TARGET_LOCAL_NED"""
startpos = self.mav.recv_match(type='LOCAL_POSITION_NED', blocking=True)
self.progress("startpos=%s" % str(startpos))
tstart = self.get_sim_time()
# send a position-control command
self.mav.mav.set_position_target_local_ned_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_LOCAL_NED,
MAV_POS_TARGET_TYPE_MASK.POS_ONLY | MAV_POS_TARGET_TYPE_MASK.LAST_BYTE, # mask specifying use-only-x-y-z
x, # x
y, # y
-z_up,# z
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Did not reach destination")
if self.distance_to_local_position((x, y, -z_up)) < 1:
break
def test_guided_local_position_target(self, x, y, z_up):
""" Check target position being received by vehicle """
# set POSITION_TARGET_LOCAL_NED message rate using SET_MESSAGE_INTERVAL
self.progress("Setting local target in NED: (%f, %f, %f)" % (x, y, -z_up))
self.progress("Setting rate to 1 Hz")
self.set_message_rate_hz(mavutil.mavlink.MAVLINK_MSG_ID_POSITION_TARGET_LOCAL_NED, 1)
# mask specifying use only xyz
target_typemask = MAV_POS_TARGET_TYPE_MASK.POS_ONLY
# set position target
self.mav.mav.set_position_target_local_ned_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_LOCAL_NED,
target_typemask | MAV_POS_TARGET_TYPE_MASK.LAST_BYTE,
x, # x
y, # y
-z_up, # z
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
m = self.mav.recv_match(type='POSITION_TARGET_LOCAL_NED', blocking=True, timeout=2)
self.progress("Received local target: %s" % str(m))
if not (m.type_mask == (target_typemask | MAV_POS_TARGET_TYPE_MASK.LAST_BYTE) or m.type_mask == target_typemask):
raise NotAchievedException("Did not receive proper mask: expected=%u or %u, got=%u" %
((target_typemask | MAV_POS_TARGET_TYPE_MASK.LAST_BYTE), target_typemask, m.type_mask))
if x - m.x > 0.1:
raise NotAchievedException("Did not receive proper target position x: wanted=%f got=%f" % (x, m.x))
if y - m.y > 0.1:
raise NotAchievedException("Did not receive proper target position y: wanted=%f got=%f" % (y, m.y))
if z_up - (-m.z) > 0.1:
raise NotAchievedException("Did not receive proper target position z: wanted=%f got=%f" % (z_up, -m.z))
def test_guided_local_velocity_target(self, vx, vy, vz_up, timeout=3):
" Check local target velocity being recieved by vehicle "
self.progress("Setting local NED velocity target: (%f, %f, %f)" % (vx, vy, -vz_up))
self.progress("Setting POSITION_TARGET_LOCAL_NED message rate to 10Hz")
self.set_message_rate_hz(mavutil.mavlink.MAVLINK_MSG_ID_POSITION_TARGET_LOCAL_NED, 10)
# mask specifying use only vx,vy,vz & accel. Even though we don't test acceltargets below currently
# a velocity only mask returns a velocity & accel mask
target_typemask = (MAV_POS_TARGET_TYPE_MASK.POS_IGNORE |
MAV_POS_TARGET_TYPE_MASK.YAW_IGNORE | MAV_POS_TARGET_TYPE_MASK.YAW_RATE_IGNORE)
# Drain old messages and ignore the ramp-up to the required target velocity
tstart = self.get_sim_time()
while self.get_sim_time_cached() - tstart < timeout:
# send velocity-control command
self.mav.mav.set_position_target_local_ned_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_LOCAL_NED,
target_typemask | MAV_POS_TARGET_TYPE_MASK.LAST_BYTE,
0, # x
0, # y
0, # z
vx, # vx
vy, # vy
-vz_up, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
m = self.mav.recv_match(type='POSITION_TARGET_LOCAL_NED', blocking=True, timeout=1)
if m is None:
raise NotAchievedException("Did not receive any message for 1 sec")
self.progress("Received local target: %s" % str(m))
# Check the last received message
if not (m.type_mask == (target_typemask | MAV_POS_TARGET_TYPE_MASK.LAST_BYTE) or m.type_mask == target_typemask):
raise NotAchievedException("Did not receive proper mask: expected=%u or %u, got=%u" %
((target_typemask | MAV_POS_TARGET_TYPE_MASK.LAST_BYTE), target_typemask, m.type_mask))
if vx - m.vx > 0.1:
raise NotAchievedException("Did not receive proper target velocity vx: wanted=%f got=%f" % (vx, m.vx))
if vy - m.vy > 0.1:
raise NotAchievedException("Did not receive proper target velocity vy: wanted=%f got=%f" % (vy, m.vy))
if vz_up - (-m.vz) > 0.1:
raise NotAchievedException("Did not receive proper target velocity vz: wanted=%f got=%f" % (vz_up, -m.vz))
self.progress("Received proper target velocity commands")
def test_position_target_message_mode(self):
" Ensure that POSITION_TARGET_LOCAL_NED messages are sent in Guided Mode only "
self.hover()
self.change_mode('LOITER')
self.progress("Setting POSITION_TARGET_LOCAL_NED message rate to 10Hz")
self.set_message_rate_hz(mavutil.mavlink.MAVLINK_MSG_ID_POSITION_TARGET_LOCAL_NED, 10)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + 5:
m = self.mav.recv_match(type='POSITION_TARGET_LOCAL_NED', blocking=True, timeout=1)
if m is None:
continue
raise NotAchievedException("Received POSITION_TARGET message in LOITER mode: %s" % str(m))
self.progress("Did not receive any POSITION_TARGET_LOCAL_NED message in LOITER mode. Success")
def earth_to_body(self, vector):
r = mavextra.rotation(self.mav.messages["ATTITUDE"]).invert()
# print("r=%s" % str(r))
return r * vector
def loiter_to_ne(self, x, y, z, timeout=40):
'''loiter to x, y, z from origin (in metres), z is *up*'''
dest_ned = rotmat.Vector3(x, y, -z)
tstart = self.get_sim_time()
success_start = -1
while True:
now = self.get_sim_time_cached()
if now - tstart > timeout:
raise NotAchievedException("Did not loiter to ne!")
m_pos = self.mav.recv_match(type='LOCAL_POSITION_NED',
blocking=True)
pos_ned = rotmat.Vector3(m_pos.x, m_pos.y, m_pos.z)
# print("dest_ned=%s" % str(dest_ned))
# print("pos_ned=%s" % str(pos_ned))
delta_ef = dest_ned - pos_ned
# print("delta_ef=%s" % str(delta_ef))
# determine if we've successfully navigated to close to
# where we should be:
dist = math.sqrt(delta_ef.x * delta_ef.x + delta_ef.y * delta_ef.y)
dist_max = 0.15
self.progress("dist=%f want <%f" % (dist, dist_max))
if dist < dist_max:
# success! We've gotten within our target distance
if success_start == -1:
success_start = now
elif now - success_start > 10:
self.progress("Yay!")
break
else:
success_start = -1
delta_bf = self.earth_to_body(delta_ef)
# print("delta_bf=%s" % str(delta_bf))
angle_x = math.atan2(delta_bf.y, delta_bf.z)
angle_y = -math.atan2(delta_bf.x, delta_bf.z)
distance = math.sqrt(delta_bf.x * delta_bf.x +
delta_bf.y * delta_bf.y +
delta_bf.z * delta_bf.z)
# att = self.mav.messages["ATTITUDE"]
# print("r=%f p=%f y=%f" % (math.degrees(att.roll), math.degrees(att.pitch), math.degrees(att.yaw)))
# print("angle_x=%s angle_y=%s" % (str(math.degrees(angle_x)), str(math.degrees(angle_y))))
# print("distance=%s" % str(distance))
self.mav.mav.landing_target_send(
0, # time_usec
1, # target_num
mavutil.mavlink.MAV_FRAME_GLOBAL, # frame; AP ignores
angle_x, # angle x (radians)
angle_y, # angle y (radians)
distance, # distance to target
0.01, # size of target in radians, X-axis
0.01 # size of target in radians, Y-axis
)
def fly_payload_place_mission(self):
"""Test payload placing in auto."""
self.context_push()
ex = None
try:
self.set_analog_rangefinder_parameters()
self.set_parameter("GRIP_ENABLE", 1)
self.set_parameter("GRIP_TYPE", 1)
self.set_parameter("SIM_GRPS_ENABLE", 1)
self.set_parameter("SIM_GRPS_PIN", 8)
self.set_parameter("SERVO8_FUNCTION", 28)
self.set_parameter("RC9_OPTION", 19)
self.reboot_sitl()
self.set_rc(9, 2000)
# load the mission:
self.load_mission("copter_payload_place.txt")
self.progress("Waiting for location")
self.mav.location()
self.zero_throttle()
self.change_mode('STABILIZE')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1500)
self.wait_text("Gripper load releas", timeout=90)
self.wait_disarmed()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
self.progress("All done")
if ex is not None:
raise ex
def fly_guided_change_submode(self):
""""Ensure we can move around in guided after a takeoff command."""
'''start by disabling GCS failsafe, otherwise we immediately disarm
due to (apparently) not receiving traffic from the GCS for
too long. This is probably a function of --speedup'''
self.set_parameter("FS_GCS_ENABLE", 0)
self.set_parameter("DISARM_DELAY", 0) # until traffic problems are fixed
self.change_mode("GUIDED")
self.wait_ready_to_arm()
self.arm_vehicle()
self.user_takeoff(alt_min=10)
self.start_subtest("yaw through absolute angles using MAV_CMD_CONDITION_YAW")
self.guided_achieve_heading(45)
self.guided_achieve_heading(135)
self.start_subtest("move the vehicle using set_position_target_global_int")
# the following numbers are 5-degree-latitude and 5-degrees
# longitude - just so that we start to really move a lot.
self.fly_guided_move_global_relative_alt(5, 5, 10)
self.start_subtest("move the vehicle using MAVLINK_MSG_ID_SET_POSITION_TARGET_LOCAL_NED")
self.fly_guided_stop(groundspeed_tolerance=0.1)
self.fly_guided_move_local(5, 5, 10)
self.start_subtest("Check target position received by vehicle using SET_MESSAGE_INTERVAL")
self.test_guided_local_position_target(5, 5, 10)
self.test_guided_local_velocity_target(2, 2, 1)
self.test_position_target_message_mode()
self.do_RTL()
def test_gripper_mission(self):
self.context_push()
ex = None
try:
self.load_mission("copter-gripper-mission.txt")
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.assert_vehicle_location_is_at_startup_location()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1500)
self.wait_statustext("Gripper Grabbed", timeout=60)
self.wait_statustext("Gripper Released", timeout=60)
except Exception as e:
self.print_exception_caught(e)
self.change_mode('LAND')
ex = e
self.context_pop()
self.wait_disarmed()
if ex is not None:
raise ex
def test_spline_last_waypoint(self):
self.context_push()
ex = None
try:
self.load_mission("copter-spline-last-waypoint.txt")
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1500)
self.wait_altitude(10, 3000, relative=True)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.do_RTL()
self.wait_disarmed()
if ex is not None:
raise ex
def fly_manual_throttle_mode_change(self):
self.set_parameter("FS_GCS_ENABLE", 0) # avoid GUIDED instant disarm
self.change_mode("STABILIZE")
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode("ACRO")
self.change_mode("STABILIZE")
self.change_mode("GUIDED")
self.set_rc(3, 1700)
self.watch_altitude_maintained(-1, 0.2) # should not take off in guided
self.run_cmd_do_set_mode(
"ACRO",
want_result=mavutil.mavlink.MAV_RESULT_FAILED)
self.run_cmd_do_set_mode(
"STABILIZE",
want_result=mavutil.mavlink.MAV_RESULT_FAILED)
self.run_cmd_do_set_mode(
"DRIFT",
want_result=mavutil.mavlink.MAV_RESULT_FAILED)
self.progress("Check setting an invalid mode")
self.run_cmd(
mavutil.mavlink.MAV_CMD_DO_SET_MODE,
mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED,
126,
0,
0,
0,
0,
0,
want_result=mavutil.mavlink.MAV_RESULT_FAILED,
timeout=1
)
self.set_rc(3, 1000)
self.run_cmd_do_set_mode("ACRO")
self.wait_disarmed()
def test_mount_pitch(self, despitch, despitch_tolerance, timeout=10, hold=0):
tstart = self.get_sim_time()
success_start = 0
while True:
now = self.get_sim_time_cached()
if now - tstart > timeout:
raise NotAchievedException("Mount pitch not achieved")
m = self.mav.recv_match(type='MOUNT_STATUS',
blocking=True,
timeout=5)
# self.progress("pitch=%f roll=%f yaw=%f" %
# (m.pointing_a, m.pointing_b, m.pointing_c))
mount_pitch = m.pointing_a/100.0 # centidegrees to degrees
if abs(despitch - mount_pitch) > despitch_tolerance:
self.progress("Mount pitch incorrect: got=%f want=%f (+/- %f)" %
(mount_pitch, despitch, despitch_tolerance))
success_start = 0
continue
self.progress("Mount pitch correct: %f degrees == %f" %
(mount_pitch, despitch))
if success_start == 0:
success_start = now
continue
if now - success_start > hold:
self.progress("Mount pitch achieved")
return
def do_pitch(self, pitch):
'''pitch aircraft in guided/angle mode'''
self.mav.mav.set_attitude_target_send(
0, # time_boot_ms
1, # target sysid
1, # target compid
0, # bitmask of things to ignore
mavextra.euler_to_quat([0, math.radians(pitch), 0]), # att
0, # roll rate (rad/s)
1, # pitch rate
0, # yaw rate
0.5) # thrust, 0 to 1, translated to a climb/descent rate
def setup_servo_mount(self, roll_servo=5, pitch_servo=6, yaw_servo=7):
'''configure a rpy servo mount; caller responsible for required rebooting'''
self.progress("Setting up servo mount")
self.set_parameter("MNT_TYPE", 1)
self.set_parameter("SERVO%u_FUNCTION" % roll_servo, 8) # roll
self.set_parameter("SERVO%u_FUNCTION" % pitch_servo, 7) # pitch
self.set_parameter("SERVO%u_FUNCTION" % yaw_servo, 6) # yaw
def test_mount(self):
ex = None
self.context_push()
old_srcSystem = self.mav.mav.srcSystem
self.mav.mav.srcSystem = 250
self.set_parameter("DISARM_DELAY", 0)
try:
'''start by disabling GCS failsafe, otherwise we immediately disarm
due to (apparently) not receiving traffic from the GCS for
too long. This is probably a function of --speedup'''
self.set_parameter("FS_GCS_ENABLE", 0)
self.setup_servo_mount()
self.reboot_sitl() # to handle MNT_TYPE changing
# make sure we're getting mount status and gimbal reports
self.mav.recv_match(type='MOUNT_STATUS',
blocking=True,
timeout=5)
self.mav.recv_match(type='GIMBAL_REPORT',
blocking=True,
timeout=5)
# test pitch isn't stabilising:
m = self.mav.recv_match(type='MOUNT_STATUS',
blocking=True,
timeout=5)
if m.pointing_a != 0 or m.pointing_b != 0 or m.pointing_c != 0:
raise NotAchievedException("Mount stabilising when not requested")
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
self.user_takeoff()
despitch = 10
despitch_tolerance = 3
self.progress("Pitching vehicle")
self.do_pitch(despitch) # will time out!
self.wait_pitch(despitch, despitch_tolerance)
# check we haven't modified:
m = self.mav.recv_match(type='MOUNT_STATUS',
blocking=True,
timeout=5)
if m.pointing_a != 0 or m.pointing_b != 0 or m.pointing_c != 0:
raise NotAchievedException("Mount stabilising when not requested")
self.progress("Enable pitch stabilization using MOUNT_CONFIGURE")
self.mav.mav.mount_configure_send(
1, # target system
1, # target component
mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING,
0, # stab-roll
1, # stab-pitch
0)
self.do_pitch(despitch)
self.test_mount_pitch(-despitch, 1)
self.progress("Disable pitch using MAV_CMD_DO_MOUNT_CONFIGURE")
self.do_pitch(despitch)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING,
0,
0,
0,
0,
0,
0,
)
self.test_mount_pitch(0, 0)
self.progress("Point somewhere using MOUNT_CONTROL (ANGLE)")
self.do_pitch(despitch)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING,
0,
0,
0,
0,
0,
0,
)
self.mav.mav.mount_control_send(
1, # target system
1, # target component
20 * 100, # pitch
20 * 100, # roll (centidegrees)
0, # yaw
0 # save position
)
self.test_mount_pitch(20, 1)
self.progress("Point somewhere using MOUNT_CONTROL (GPS)")
self.do_pitch(despitch)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_GPS_POINT,
0,
0,
0,
0,
0,
0,
)
start = self.mav.location()
self.progress("start=%s" % str(start))
(t_lat, t_lon) = mavextra.gps_offset(start.lat, start.lng, 10, 20)
t_alt = 0
self.progress("loc %f %f %f" % (start.lat, start.lng, start.alt))
self.progress("targetting %f %f %f" % (t_lat, t_lon, t_alt))
self.do_pitch(despitch)
self.mav.mav.mount_control_send(
1, # target system
1, # target component
int(t_lat * 1e7), # lat
int(t_lon * 1e7), # lon
t_alt * 100, # alt
0 # save position
)
self.test_mount_pitch(-52, 5)
# now test RC targetting
self.progress("Testing mount RC targetting")
# this is a one-off; ArduCopter *will* time out this directive!
self.progress("Levelling aircraft")
self.mav.mav.set_attitude_target_send(
0, # time_boot_ms
1, # target sysid
1, # target compid
0, # bitmask of things to ignore
mavextra.euler_to_quat([0, 0, 0]), # att
1, # roll rate (rad/s)
1, # pitch rate
1, # yaw rate
0.5) # thrust, 0 to 1, translated to a climb/descent rate
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING,
0,
0,
0,
0,
0,
0,
)
try:
self.context_push()
self.set_parameter('MNT_RC_IN_ROLL', 11)
self.set_parameter('MNT_RC_IN_TILT', 12)
self.set_parameter('MNT_RC_IN_PAN', 13)
self.progress("Testing RC angular control")
# default RC min=1100 max=1900
self.set_rc_from_map({
11: 1500,
12: 1500,
13: 1500,
})
self.test_mount_pitch(0, 1)
self.progress("Testing RC input down 1/4 of its range in the output, should be down 1/4 range in output")
rc12_in = 1400
rc12_min = 1100 # default
rc12_max = 1900 # default
angmin_tilt = -45.0 # default
angmax_tilt = 45.0 # default
expected_pitch = (float(rc12_in-rc12_min)/float(rc12_max-rc12_min) * (angmax_tilt-angmin_tilt)) + angmin_tilt
self.progress("expected mount pitch: %f" % expected_pitch)
if expected_pitch != -11.25:
raise NotAchievedException("Calculation wrong - defaults changed?!")
self.set_rc(12, rc12_in)
self.test_mount_pitch(-11.25, 0.01)
self.set_rc(12, 1800)
self.test_mount_pitch(33.75, 0.01)
self.set_rc_from_map({
11: 1500,
12: 1500,
13: 1500,
})
try:
self.progress(
"Issue https://discuss.ardupilot.org/t/"
"gimbal-limits-with-storm32-backend-mavlink-not-applied-correctly/51438"
)
self.context_push()
self.set_parameter("RC12_MIN", 1000)
self.set_parameter("RC12_MAX", 2000)
self.set_parameter("MNT_ANGMIN_TIL", -9000)
self.set_parameter("MNT_ANGMAX_TIL", 1000)
self.set_rc(12, 1000)
self.test_mount_pitch(-90.00, 0.01)
self.set_rc(12, 2000)
self.test_mount_pitch(10.00, 0.01)
self.set_rc(12, 1500)
self.test_mount_pitch(-40.00, 0.01)
finally:
self.context_pop()
self.set_rc(12, 1500)
self.progress("Testing RC rate control")
self.set_parameter('MNT_JSTICK_SPD', 10)
self.test_mount_pitch(0, 1)
self.set_rc(12, 1300)
self.test_mount_pitch(-5, 1)
self.test_mount_pitch(-10, 1)
self.test_mount_pitch(-15, 1)
self.test_mount_pitch(-20, 1)
self.set_rc(12, 1700)
self.test_mount_pitch(-15, 1)
self.test_mount_pitch(-10, 1)
self.test_mount_pitch(-5, 1)
self.test_mount_pitch(0, 1)
self.test_mount_pitch(5, 1)
self.progress("Reverting to angle mode")
self.set_parameter('MNT_JSTICK_SPD', 0)
self.set_rc(12, 1500)
self.test_mount_pitch(0, 0.1)
self.context_pop()
except Exception as e:
self.print_exception_caught(e)
self.context_pop()
raise e
self.progress("Testing mount ROI behaviour")
self.drain_mav_unparsed()
self.test_mount_pitch(0, 0.1)
start = self.mav.location()
self.progress("start=%s" % str(start))
(roi_lat, roi_lon) = mavextra.gps_offset(start.lat,
start.lng,
10,
20)
roi_alt = 0
self.progress("Using MAV_CMD_DO_SET_ROI_LOCATION")
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_SET_ROI_LOCATION,
0,
0,
0,
0,
roi_lat,
roi_lon,
roi_alt,
)
self.test_mount_pitch(-52, 5)
start = self.mav.location()
(roi_lat, roi_lon) = mavextra.gps_offset(start.lat,
start.lng,
-100,
-200)
roi_alt = 0
self.progress("Using MAV_CMD_DO_SET_ROI")
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_SET_ROI,
0,
0,
0,
0,
roi_lat,
roi_lon,
roi_alt,
)
self.test_mount_pitch(-7.5, 1)
start = self.mav.location()
(roi_lat, roi_lon) = mavextra.gps_offset(start.lat,
start.lng,
-100,
-200)
roi_alt = 0
self.progress("Using MAV_CMD_DO_SET_ROI (COMMAND_INT)")
self.run_cmd_int(
mavutil.mavlink.MAV_CMD_DO_SET_ROI,
0,
0,
0,
0,
int(roi_lat*1e7),
int(roi_lon*1e7),
roi_alt,
frame=mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
)
self.test_mount_pitch(-7.5, 1)
self.progress("Using MAV_CMD_DO_SET_ROI (COMMAND_INT), absolute-alt-frame")
# this is pointing essentially straight down
self.run_cmd_int(
mavutil.mavlink.MAV_CMD_DO_SET_ROI,
0,
0,
0,
0,
int(roi_lat*1e7),
int(roi_lon*1e7),
roi_alt,
frame=mavutil.mavlink.MAV_FRAME_GLOBAL,
)
self.test_mount_pitch(-70, 1, hold=2)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_NEUTRAL,
0,
0,
0,
0,
0,
0,
)
self.test_mount_pitch(0, 0.1)
self.progress("Testing mount roi-sysid behaviour")
self.test_mount_pitch(0, 0.1)
start = self.mav.location()
self.progress("start=%s" % str(start))
(roi_lat, roi_lon) = mavextra.gps_offset(start.lat,
start.lng,
10,
20)
roi_alt = 0
self.progress("Using MAV_CMD_DO_SET_ROI_SYSID")
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_SET_ROI_SYSID,
250,
0,
0,
0,
0,
0,
0,
)
self.mav.mav.global_position_int_send(
0, # time boot ms
int(roi_lat * 1e7),
int(roi_lon * 1e7),
0 * 1000, # mm alt amsl
0 * 1000, # relalt mm UP!
0, # vx
0, # vy
0, # vz
0 # heading
)
self.test_mount_pitch(-89, 5, hold=2)
self.mav.mav.global_position_int_send(
0, # time boot ms
int(roi_lat * 1e7),
int(roi_lon * 1e7),
670 * 1000, # mm alt amsl
100 * 1000, # mm UP!
0, # vx
0, # vy
0, # vz
0 # heading
)
self.test_mount_pitch(68, 5, hold=2)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_MOUNT_CONFIGURE,
mavutil.mavlink.MAV_MOUNT_MODE_NEUTRAL,
0,
0,
0,
0,
0,
0,
)
self.test_mount_pitch(0, 0.1)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.mav.mav.srcSystem = old_srcSystem
self.disarm_vehicle(force=True)
self.reboot_sitl() # to handle MNT_TYPE changing
if ex is not None:
raise ex
def MountYawVehicleForMountROI(self):
self.context_push()
self.set_parameter("SYSID_MYGCS", self.mav.source_system)
yaw_servo = 7
self.setup_servo_mount(yaw_servo=yaw_servo)
self.reboot_sitl() # to handle MNT_TYPE changing
self.progress("checking ArduCopter yaw-aircraft-for-roi")
ex = None
try:
self.takeoff(20, mode='GUIDED')
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
self.progress("current heading %u" % m.heading)
self.set_parameter("SERVO%u_FUNCTION" % yaw_servo, 0) # yaw
self.progress("Waiting for check_servo_map to do its job")
self.delay_sim_time(5)
self.progress("Pointing North")
self.guided_achieve_heading(0)
self.delay_sim_time(5)
start = self.mav.location()
(roi_lat, roi_lon) = mavextra.gps_offset(start.lat,
start.lng,
-100,
-100)
roi_alt = 0
self.progress("Using MAV_CMD_DO_SET_ROI")
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_SET_ROI,
0,
0,
0,
0,
roi_lat,
roi_lon,
roi_alt,
)
self.progress("Waiting for vehicle to point towards ROI")
self.wait_heading(225, timeout=600, minimum_duration=2)
# the following numbers are 1-degree-latitude and
# 0-degrees longitude - just so that we start to
# really move a lot.
there = mavutil.location(1, 0, 0, 0)
self.progress("Starting to move")
self.mav.mav.set_position_target_global_int_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT_INT,
MAV_POS_TARGET_TYPE_MASK.POS_ONLY | MAV_POS_TARGET_TYPE_MASK.LAST_BYTE, # mask specifying use-only-lat-lon-alt
there.lat, # lat
there.lng, # lon
there.alt, # alt
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
self.progress("Starting to move changes the target")
bearing = self.bearing_to(there)
self.wait_heading(bearing, timeout=600, minimum_duration=2)
self.run_cmd(mavutil.mavlink.MAV_CMD_DO_SET_ROI,
0,
0,
0,
0,
roi_lat,
roi_lon,
roi_alt,
)
self.progress("Wait for vehicle to point sssse due to moving")
self.wait_heading(170, timeout=600, minimum_duration=1)
self.do_RTL()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def fly_throw_mode(self):
# test boomerang mode:
self.progress("Throwing vehicle away")
self.set_parameters({
"THROW_NEXTMODE": 6,
"SIM_SHOVE_Z": -30,
"SIM_SHOVE_X": -20,
})
self.change_mode('THROW')
self.wait_ready_to_arm()
self.arm_vehicle()
try:
self.set_parameter("SIM_SHOVE_TIME", 500)
except ValueError:
# the shove resets this to zero
pass
tstart = self.get_sim_time()
self.wait_mode('RTL')
max_good_tdelta = 15
tdelta = self.get_sim_time() - tstart
self.progress("Vehicle in RTL")
self.wait_rtl_complete()
self.progress("Vehicle disarmed")
if tdelta > max_good_tdelta:
raise NotAchievedException("Took too long to enter RTL: %fs > %fs" %
(tdelta, max_good_tdelta))
self.progress("Vehicle returned")
def hover_and_check_matched_frequency_with_fft(self, dblevel=-15, minhz=200, maxhz=300, peakhz=None, reverse=None):
# find a motor peak
self.takeoff(10, mode="ALT_HOLD")
hover_time = 15
tstart = self.get_sim_time()
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
vfr_hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
tend = self.get_sim_time()
self.do_RTL()
psd = self.mavfft_fttd(1, 0, tstart * 1.0e6, tend * 1.0e6)
# batch sampler defaults give 1024 fft and sample rate of 1kz so roughly 1hz/bin
freq = psd["F"][numpy.argmax(psd["X"][minhz:maxhz]) + minhz] * (1000. / 1024.)
peakdb = numpy.amax(psd["X"][minhz:maxhz])
if peakdb < dblevel or (peakhz is not None and abs(freq - peakhz) / peakhz > 0.05):
if reverse is not None:
self.progress("Did not detect a motor peak, found %fHz at %fdB" % (freq, peakdb))
else:
raise NotAchievedException("Did not detect a motor peak, found %fHz at %fdB" % (freq, peakdb))
else:
if reverse is not None:
raise NotAchievedException(
"Detected motor peak at %fHz, throttle %f%%, %fdB" %
(freq, vfr_hud.throttle, peakdb))
else:
self.progress("Detected motor peak at %fHz, throttle %f%%, %fdB" % (freq, vfr_hud.throttle, peakdb))
return freq, vfr_hud, peakdb
def fly_dynamic_notches(self):
"""Use dynamic harmonic notch to control motor noise."""
self.progress("Flying with dynamic notches")
self.context_push()
ex = None
try:
self.set_parameters({
"AHRS_EKF_TYPE": 10,
"INS_LOG_BAT_MASK": 3,
"INS_LOG_BAT_OPT": 0,
"INS_GYRO_FILTER": 100, # set the gyro filter high so we can observe behaviour
"LOG_BITMASK": 958,
"LOG_DISARMED": 0,
"SIM_VIB_MOT_MAX": 350,
"SIM_GYR1_RND": 20,
})
self.reboot_sitl()
self.takeoff(10, mode="ALT_HOLD")
# find a motor peak
freq, vfr_hud, peakdb = self.hover_and_check_matched_frequency_with_fft(-15, 200, 300)
# now add a dynamic notch and check that the peak is squashed
self.set_parameters({
"INS_LOG_BAT_OPT": 2,
"INS_HNTCH_ENABLE": 1,
"INS_HNTCH_FREQ": freq,
"INS_HNTCH_REF": vfr_hud.throttle/100.,
"INS_HNTCH_HMNCS": 5, # first and third harmonic
"INS_HNTCH_ATT": 50,
"INS_HNTCH_BW": freq/2,
})
self.reboot_sitl()
freq, vfr_hud, peakdb1 = self.hover_and_check_matched_frequency_with_fft(-10, 20, 350, reverse=True)
# now add double dynamic notches and check that the peak is squashed
self.set_parameter("INS_HNTCH_OPTS", 1)
self.reboot_sitl()
freq, vfr_hud, peakdb2 = self.hover_and_check_matched_frequency_with_fft(-15, 20, 350, reverse=True)
# double-notch should do better, but check for within 5%
if peakdb2 * 1.05 > peakdb1:
raise NotAchievedException(
"Double-notch peak was higher than single-notch peak %fdB > %fdB" %
(peakdb2, peakdb1))
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def fly_esc_telemetry_notches(self):
"""Use dynamic harmonic notch to control motor noise via ESC telemetry."""
self.progress("Flying with ESC telemetry driven dynamic notches")
self.set_rc_default()
self.set_parameter("AHRS_EKF_TYPE", 10)
self.set_parameter("INS_LOG_BAT_MASK", 3)
self.set_parameter("INS_LOG_BAT_OPT", 0)
# set the gyro filter high so we can observe behaviour
self.set_parameter("INS_GYRO_FILTER", 100)
self.set_parameter("LOG_BITMASK", 958)
self.set_parameter("LOG_DISARMED", 0)
self.set_parameter("SIM_VIB_MOT_MAX", 350)
self.set_parameter("SIM_GYR1_RND", 20)
self.set_parameter("SIM_ESC_TELEM", 1)
self.reboot_sitl()
self.takeoff(10, mode="ALT_HOLD")
# find a motor peak
freq, vfr_hud, peakdb = self.hover_and_check_matched_frequency_with_fft(-15, 200, 300)
# now add a dynamic notch and check that the peak is squashed
self.set_parameter("INS_LOG_BAT_OPT", 2)
self.set_parameter("INS_HNTCH_ENABLE", 1)
self.set_parameter("INS_HNTCH_FREQ", 80)
self.set_parameter("INS_HNTCH_REF", 1.0)
# first and third harmonic
self.set_parameter("INS_HNTCH_HMNCS", 5)
self.set_parameter("INS_HNTCH_ATT", 50)
self.set_parameter("INS_HNTCH_BW", 40)
self.set_parameter("INS_HNTCH_MODE", 3)
self.reboot_sitl()
freq, vfr_hud, peakdb1 = self.hover_and_check_matched_frequency_with_fft(-10, 20, 350, reverse=True)
# now add notch-per motor and check that the peak is squashed
self.set_parameter("INS_HNTCH_OPTS", 2)
self.reboot_sitl()
freq, vfr_hud, peakdb2 = self.hover_and_check_matched_frequency_with_fft(-15, 20, 350, reverse=True)
# notch-per-motor should do better, but check for within 5%
if peakdb2 * 1.05 > peakdb1:
raise NotAchievedException(
"Notch-per-motor peak was higher than single-notch peak %fdB > %fdB" %
(peakdb2, peakdb1))
def hover_and_check_matched_frequency(self, dblevel=-15, minhz=200, maxhz=300, fftLength=32, peakhz=None):
# find a motor peak
self.takeoff(10, mode="ALT_HOLD")
hover_time = 15
tstart = self.get_sim_time()
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
vfr_hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
tend = self.get_sim_time()
self.do_RTL()
psd = self.mavfft_fttd(1, 0, tstart * 1.0e6, tend * 1.0e6)
# batch sampler defaults give 1024 fft and sample rate of 1kz so roughly 1hz/bin
scale = 1000. / 1024.
sminhz = int(minhz * scale)
smaxhz = int(maxhz * scale)
freq = psd["F"][numpy.argmax(psd["X"][sminhz:smaxhz]) + sminhz]
peakdb = numpy.amax(psd["X"][sminhz:smaxhz])
if peakdb < dblevel:
raise NotAchievedException("Did not detect a motor peak, found %fHz at %fdB" % (freq, peakdb))
elif peakhz is not None and abs(freq - peakhz) / peakhz > 0.05:
raise NotAchievedException("Did not detect a motor peak at %fHz, found %fHz at %fdB" % (peakhz, freq, peakdb))
else:
self.progress("Detected motor peak at %fHz, throttle %f%%, %fdB" % (freq, vfr_hud.throttle, peakdb))
# we have a peak make sure that the FFT detected something close
# logging is at 10Hz
mlog = self.dfreader_for_current_onboard_log()
# accuracy is determined by sample rate and fft length, given our use of quinn we could probably use half of this
freqDelta = 1000. / fftLength
pkAvg = freq
nmessages = 1
m = mlog.recv_match(
type='FTN1',
blocking=False,
condition="FTN1.TimeUS>%u and FTN1.TimeUS<%u" % (tstart * 1.0e6, tend * 1.0e6)
)
freqs = []
while m is not None:
nmessages = nmessages + 1
freqs.append(m.PkAvg)
m = mlog.recv_match(
type='FTN1',
blocking=False,
condition="FTN1.TimeUS>%u and FTN1.TimeUS<%u" % (tstart * 1.0e6, tend * 1.0e6)
)
# peak within resolution of FFT length
pkAvg = numpy.median(numpy.asarray(freqs))
self.progress("Detected motor peak at %fHz processing %d messages" % (pkAvg, nmessages))
# peak within 5%
if abs(pkAvg - freq) > freqDelta:
raise NotAchievedException("FFT did not detect a motor peak at %f, found %f, wanted %f" % (dblevel, pkAvg, freq))
return freq
def fly_gyro_fft_harmonic(self):
"""Use dynamic harmonic notch to control motor noise with harmonic matching of the first harmonic."""
# basic gyro sample rate test
self.progress("Flying with gyro FFT harmonic - Gyro sample rate")
self.context_push()
ex = None
# we are dealing with probabalistic scenarios involving threads, have two bites at the cherry
try:
self.start_subtest("Hover to calculate approximate hover frequency")
# magic tridge EKF type that dramatically speeds up the test
self.set_parameters({
"AHRS_EKF_TYPE": 10,
"EK2_ENABLE": 0,
"EK3_ENABLE": 0,
"INS_LOG_BAT_MASK": 3,
"INS_LOG_BAT_OPT": 0,
"INS_GYRO_FILTER": 100,
"INS_FAST_SAMPLE": 0,
"LOG_BITMASK": 958,
"LOG_DISARMED": 0,
"SIM_DRIFT_SPEED": 0,
"SIM_DRIFT_TIME": 0,
"FFT_THR_REF": self.get_parameter("MOT_THST_HOVER"),
"SIM_GYR1_RND": 20, # enable a noisy gyro
})
# motor peak enabling FFT will also enable the arming
# check, self-testing the functionality
self.set_parameters({
"FFT_ENABLE": 1,
"FFT_MINHZ": 50,
"FFT_MAXHZ": 450,
"FFT_SNR_REF": 10,
})
# Step 1: inject actual motor noise and use the FFT to track it
self.set_parameters({
"SIM_VIB_MOT_MAX": 250, # gives a motor peak at about 175Hz
"FFT_WINDOW_SIZE": 64,
"FFT_WINDOW_OLAP": 0.75,
})
self.reboot_sitl()
freq = self.hover_and_check_matched_frequency(-15, 100, 250, 64)
# Step 2: add a second harmonic and check the first is still tracked
self.start_subtest("Add a fixed frequency harmonic at twice the hover frequency "
"and check the right harmonic is found")
self.set_parameters({
"SIM_VIB_FREQ_X": freq * 2,
"SIM_VIB_FREQ_Y": freq * 2,
"SIM_VIB_FREQ_Z": freq * 2,
"SIM_VIB_MOT_MULT": 0.25, # halve the motor noise so that the higher harmonic dominates
})
self.reboot_sitl()
self.hover_and_check_matched_frequency(-15, 100, 250, 64, None)
# Step 3: switch harmonics mid flight and check for tracking
self.start_subtest("Switch harmonics mid flight and check the right harmonic is found")
self.set_parameter("FFT_HMNC_PEAK", 0)
self.reboot_sitl()
self.takeoff(10, mode="ALT_HOLD")
hover_time = 10
tstart = self.get_sim_time()
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
vfr_hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
self.set_parameter("SIM_VIB_MOT_MULT", 5.0)
self.progress("Hovering for %u seconds" % hover_time)
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
vfr_hud = self.mav.recv_match(type='VFR_HUD', blocking=True)
tend = self.get_sim_time()
self.do_RTL()
mlog = self.dfreader_for_current_onboard_log()
m = mlog.recv_match(
type='FTN1',
blocking=False,
condition="FTN1.TimeUS>%u and FTN1.TimeUS<%u" % (tstart * 1.0e6, tend * 1.0e6))
freqs = []
while m is not None:
freqs.append(m.PkAvg)
m = mlog.recv_match(
type='FTN1',
blocking=False,
condition="FTN1.TimeUS>%u and FTN1.TimeUS<%u" % (tstart * 1.0e6, tend * 1.0e6))
# peak within resolution of FFT length, the highest energy peak switched but our detection should not
pkAvg = numpy.median(numpy.asarray(freqs))
freqDelta = 1000. / self.get_parameter("FFT_WINDOW_SIZE")
if abs(pkAvg - freq) > freqDelta:
raise NotAchievedException("FFT did not detect a harmonic motor peak, found %f, wanted %f" % (pkAvg, freq))
# Step 4: dynamic harmonic
self.start_subtest("Enable dynamic harmonics and make sure both frequency peaks are attenuated")
# find a motor peak
freq, vfr_hud, peakdb = self.hover_and_check_matched_frequency_with_fft(-15, 100, 350)
# now add a dynamic notch and check that the peak is squashed
self.set_parameters({
"INS_LOG_BAT_OPT": 2,
"INS_HNTCH_ENABLE": 1,
"INS_HNTCH_HMNCS": 3,
"INS_HNTCH_MODE": 4,
"INS_HNTCH_FREQ": freq,
"INS_HNTCH_REF": vfr_hud.throttle/100.0,
"INS_HNTCH_ATT": 100,
"INS_HNTCH_BW": freq/2,
"INS_HNTCH_OPTS": 3,
})
self.reboot_sitl()
# 5db is far in excess of the attenuation that the double dynamic-harmonic notch is able
# to provide (-7dB on average), but without the notch the peak is around 20dB so still a safe test
self.hover_and_check_matched_frequency_with_fft(5, 100, 350, reverse=True)
self.set_parameters({
"SIM_VIB_FREQ_X": 0,
"SIM_VIB_FREQ_Y": 0,
"SIM_VIB_FREQ_Z": 0,
"SIM_VIB_MOT_MULT": 1.0,
})
# prevent update parameters from messing with the settings when we pop the context
self.set_parameter("FFT_ENABLE", 0)
self.reboot_sitl()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
# need a final reboot because weird things happen to your
# vehicle state when switching back from EKF type 10!
self.reboot_sitl()
if ex is not None:
raise ex
def fly_gyro_fft(self):
"""Use dynamic harmonic notch to control motor noise."""
# basic gyro sample rate test
self.progress("Flying with gyro FFT - Gyro sample rate")
self.context_push()
ex = None
try:
# magic tridge EKF type that dramatically speeds up the test
self.set_parameters({
"AHRS_EKF_TYPE": 10,
"EK2_ENABLE": 0,
"EK3_ENABLE": 0,
"INS_LOG_BAT_MASK": 3,
"INS_LOG_BAT_OPT": 0,
"INS_GYRO_FILTER": 100,
"INS_FAST_SAMPLE": 0,
"LOG_BITMASK": 958,
"LOG_DISARMED": 0,
"SIM_DRIFT_SPEED": 0,
"SIM_DRIFT_TIME": 0,
"SIM_GYR1_RND": 20, # enable a noisy motor peak
})
# enabling FFT will also enable the arming check,
# self-testing the functionality
self.set_parameters({
"FFT_ENABLE": 1,
"FFT_MINHZ": 50,
"FFT_MAXHZ": 450,
"FFT_SNR_REF": 10,
"FFT_WINDOW_SIZE": 128,
"FFT_WINDOW_OLAP": 0.75,
"FFT_SAMPLE_MODE": 0,
})
# Step 1: inject a very precise noise peak at 250hz and make sure the in-flight fft
# can detect it really accurately. For a 128 FFT the frequency resolution is 8Hz so
# a 250Hz peak should be detectable within 5%
self.start_subtest("Inject noise at 250Hz and check the FFT can find the noise")
self.set_parameters({
"SIM_VIB_FREQ_X": 250,
"SIM_VIB_FREQ_Y": 250,
"SIM_VIB_FREQ_Z": 250,
})
self.reboot_sitl()
# find a motor peak
self.hover_and_check_matched_frequency(-15, 100, 350, 128, 250)
# Step 1b: run the same test with an FFT length of 256 which is needed to flush out a
# whole host of bugs related to uint8_t. This also tests very accurately the frequency resolution
self.set_parameter("FFT_WINDOW_SIZE", 256)
self.start_subtest("Inject noise at 250Hz and check the FFT can find the noise")
self.reboot_sitl()
# find a motor peak
self.hover_and_check_matched_frequency(-15, 100, 350, 256, 250)
self.set_parameter("FFT_WINDOW_SIZE", 128)
# Step 2: inject actual motor noise and use the standard length FFT to track it
self.start_subtest("Hover and check that the FFT can find the motor noise")
self.set_parameters({
"SIM_VIB_FREQ_X": 0,
"SIM_VIB_FREQ_Y": 0,
"SIM_VIB_FREQ_Z": 0,
"SIM_VIB_MOT_MAX": 250, # gives a motor peak at about 175Hz
"FFT_WINDOW_SIZE": 32,
"FFT_WINDOW_OLAP": 0.5,
})
self.reboot_sitl()
freq = self.hover_and_check_matched_frequency(-15, 100, 250, 32)
self.set_parameter("SIM_VIB_MOT_MULT", 1.)
# Step 3: add a FFT dynamic notch and check that the peak is squashed
self.start_subtest("Add a dynamic notch, hover and check that the noise peak is now gone")
self.set_parameters({
"INS_LOG_BAT_OPT": 2,
"INS_HNTCH_ENABLE": 1,
"INS_HNTCH_FREQ": freq,
"INS_HNTCH_REF": 1.0,
"INS_HNTCH_ATT": 50,
"INS_HNTCH_BW": freq/2,
"INS_HNTCH_MODE": 4,
})
self.reboot_sitl()
self.takeoff(10, mode="ALT_HOLD")
hover_time = 15
self.progress("Hovering for %u seconds" % hover_time)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
tend = self.get_sim_time()
# fly fast forrest!
self.set_rc(3, 1900)
self.set_rc(2, 1200)
self.wait_groundspeed(5, 1000)
self.set_rc(3, 1500)
self.set_rc(2, 1500)
self.do_RTL()
psd = self.mavfft_fttd(1, 0, tstart * 1.0e6, tend * 1.0e6)
# batch sampler defaults give 1024 fft and sample rate of 1kz so roughly 1hz/bin
scale = 1000. / 1024.
sminhz = int(100 * scale)
smaxhz = int(350 * scale)
freq = psd["F"][numpy.argmax(psd["X"][sminhz:smaxhz]) + sminhz]
peakdb = numpy.amax(psd["X"][sminhz:smaxhz])
if peakdb < 0:
self.progress("Did not detect a motor peak, found %fHz at %fdB" % (freq, peakdb))
else:
raise NotAchievedException("Detected %fHz motor peak at %fdB" % (freq, peakdb))
# Step 4: loop sample rate test with larger window
self.start_subtest("Hover and check that the FFT can find the motor noise when running at fast loop rate")
# we are limited to half the loop rate for frequency detection
self.set_parameters({
"FFT_MAXHZ": 185,
"INS_LOG_BAT_OPT": 0,
"SIM_VIB_MOT_MAX": 220,
"FFT_WINDOW_SIZE": 64,
"FFT_WINDOW_OLAP": 0.75,
"FFT_SAMPLE_MODE": 1,
})
self.reboot_sitl()
self.takeoff(10, mode="ALT_HOLD")
self.progress("Hovering for %u seconds" % hover_time)
tstart = self.get_sim_time()
while self.get_sim_time_cached() < tstart + hover_time:
self.mav.recv_match(type='ATTITUDE', blocking=True)
tend = self.get_sim_time()
self.do_RTL()
# prevent update parameters from messing with the settings when we pop the context
self.set_parameter("FFT_ENABLE", 0)
self.reboot_sitl()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
# must reboot after we move away from EKF type 10 to EKF2 or EKF3
self.reboot_sitl()
if ex is not None:
raise ex
def fly_brake_mode(self):
# test brake mode
self.progress("Testing brake mode")
self.takeoff(10, mode="LOITER")
self.progress("Ensuring RC inputs have no effect in brake mode")
self.change_mode("STABILIZE")
self.set_rc(3, 1500)
self.set_rc(2, 1200)
self.wait_groundspeed(5, 1000)
self.change_mode("BRAKE")
self.wait_groundspeed(0, 1)
self.set_rc(2, 1500)
self.do_RTL()
self.progress("Ran brake mode")
def fly_guided_move_to(self, destination, timeout=30):
'''move to mavutil.location location; absolute altitude'''
tstart = self.get_sim_time()
self.mav.mav.set_position_target_global_int_send(
0, # timestamp
1, # target system_id
1, # target component id
mavutil.mavlink.MAV_FRAME_GLOBAL_INT,
MAV_POS_TARGET_TYPE_MASK.POS_ONLY | MAV_POS_TARGET_TYPE_MASK.LAST_BYTE, # mask specifying use-only-lat-lon-alt
int(destination.lat * 1e7), # lat
int(destination.lng * 1e7), # lon
destination.alt, # alt
0, # vx
0, # vy
0, # vz
0, # afx
0, # afy
0, # afz
0, # yaw
0, # yawrate
)
while True:
if self.get_sim_time() - tstart > timeout:
raise NotAchievedException()
delta = self.get_distance(self.mav.location(), destination)
self.progress("delta=%f (want <1)" % delta)
if delta < 1:
break
def test_altitude_types(self):
'''start by disabling GCS failsafe, otherwise we immediately disarm
due to (apparently) not receiving traffic from the GCS for
too long. This is probably a function of --speedup'''
'''this test flies the vehicle somewhere lower than were it started.
It then disarms. It then arms, which should reset home to the
new, lower altitude. This delta should be outside 1m but
within a few metres of the old one.
'''
# we must start mavproxy here as otherwise we can't get the
# terrain database tiles - this leads to random failures in
# CI!
mavproxy = self.start_mavproxy()
self.set_parameter("FS_GCS_ENABLE", 0)
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
max_initial_home_alt_m = 500
if m.relative_alt > max_initial_home_alt_m:
raise NotAchievedException("Initial home alt too high (%fm > %fm)" %
(m.relative_alt*1000, max_initial_home_alt_m*1000))
orig_home_offset_mm = m.alt - m.relative_alt
self.user_takeoff(5)
self.progress("Flying to low position")
current_alt = self.mav.location().alt
# 10m delta low_position = mavutil.location(-35.358273, 149.169165, current_alt, 0)
low_position = mavutil.location(-35.36200016, 149.16415599, current_alt, 0)
self.fly_guided_move_to(low_position, timeout=240)
self.change_mode('LAND')
# expecting home to change when disarmed
self.wait_landed_and_disarmed()
# wait a while for home to move (it shouldn't):
self.delay_sim_time(10)
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
new_home_offset_mm = m.alt - m.relative_alt
home_offset_delta_mm = orig_home_offset_mm - new_home_offset_mm
self.progress("new home offset: %f delta=%f" %
(new_home_offset_mm, home_offset_delta_mm))
self.progress("gpi=%s" % str(m))
max_home_offset_delta_mm = 10
if home_offset_delta_mm > max_home_offset_delta_mm:
raise NotAchievedException("Large home offset delta: want<%f got=%f" %
(max_home_offset_delta_mm, home_offset_delta_mm))
self.progress("Ensuring home moves when we arm")
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
post_arming_home_offset_mm = m.alt - m.relative_alt
self.progress("post-arming home offset: %f" % (post_arming_home_offset_mm))
self.progress("gpi=%s" % str(m))
min_post_arming_home_offset_delta_mm = -2500
max_post_arming_home_offset_delta_mm = -4000
delta_between_original_home_alt_offset_and_new_home_alt_offset_mm = post_arming_home_offset_mm - orig_home_offset_mm
self.progress("delta=%f-%f=%f" % (
post_arming_home_offset_mm,
orig_home_offset_mm,
delta_between_original_home_alt_offset_and_new_home_alt_offset_mm))
self.progress("Home moved %fm vertically" % (delta_between_original_home_alt_offset_and_new_home_alt_offset_mm/1000.0))
if delta_between_original_home_alt_offset_and_new_home_alt_offset_mm > min_post_arming_home_offset_delta_mm:
raise NotAchievedException(
"Home did not move vertically on arming: want<=%f got=%f" %
(min_post_arming_home_offset_delta_mm, delta_between_original_home_alt_offset_and_new_home_alt_offset_mm))
if delta_between_original_home_alt_offset_and_new_home_alt_offset_mm < max_post_arming_home_offset_delta_mm:
raise NotAchievedException(
"Home moved too far vertically on arming: want>=%f got=%f" %
(max_post_arming_home_offset_delta_mm, delta_between_original_home_alt_offset_and_new_home_alt_offset_mm))
self.wait_disarmed()
self.stop_mavproxy(mavproxy)
def fly_precision_companion(self):
"""Use Companion PrecLand backend precision messages to loiter."""
self.context_push()
ex = None
try:
self.set_parameter("PLND_ENABLED", 1)
# enable companion backend:
self.set_parameter("PLND_TYPE", 1)
self.set_analog_rangefinder_parameters()
# set up a channel switch to enable precision loiter:
self.set_parameter("RC7_OPTION", 39)
self.reboot_sitl()
self.progress("Waiting for location")
self.mav.location()
self.zero_throttle()
self.change_mode('STABILIZE')
self.wait_ready_to_arm()
# we should be doing precision loiter at this point
start = self.mav.recv_match(type='LOCAL_POSITION_NED',
blocking=True)
self.arm_vehicle()
self.set_rc(3, 1800)
alt_min = 10
self.wait_altitude(alt_min,
(alt_min + 5),
relative=True)
self.set_rc(3, 1500)
# move away a little
self.set_rc(2, 1550)
self.wait_distance(5, accuracy=1)
self.set_rc(2, 1500)
self.change_mode('LOITER')
# turn precision loiter on:
self.set_rc(7, 2000)
# try to drag aircraft to a position 5 metres north-east-east:
self.loiter_to_ne(start.x + 5, start.y + 10, start.z + 10)
self.loiter_to_ne(start.x + 5, start.y - 10, start.z + 10)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.zero_throttle()
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.progress("All done")
if ex is not None:
raise ex
def loiter_requires_position(self):
# ensure we can't switch to LOITER without position
self.progress("Ensure we can't enter LOITER without position")
self.context_push()
self.set_parameter("GPS_TYPE", 2)
self.set_parameter("SIM_GPS_DISABLE", 1)
self.reboot_sitl()
# check for expected EKF flags
ahrs_ekf_type = self.get_parameter("AHRS_EKF_TYPE")
expected_ekf_flags = (mavutil.mavlink.ESTIMATOR_ATTITUDE |
mavutil.mavlink.ESTIMATOR_VELOCITY_VERT |
mavutil.mavlink.ESTIMATOR_POS_VERT_ABS |
mavutil.mavlink.ESTIMATOR_CONST_POS_MODE)
if ahrs_ekf_type == 2:
expected_ekf_flags = expected_ekf_flags | mavutil.mavlink.ESTIMATOR_PRED_POS_HORIZ_REL
self.wait_ekf_flags(expected_ekf_flags, 0, timeout=120)
# arm in Stabilize and attempt to switch to Loiter
self.change_mode('STABILIZE')
self.arm_vehicle()
self.context_collect('STATUSTEXT')
self.run_cmd_do_set_mode(
"LOITER",
want_result=mavutil.mavlink.MAV_RESULT_FAILED)
self.wait_statustext("requires position", check_context=True)
self.disarm_vehicle()
self.context_pop()
self.reboot_sitl()
def test_arm_feature(self):
self.loiter_requires_position()
super(AutoTestCopter, self).test_arm_feature()
def test_parameter_checks(self):
self.test_parameter_checks_poscontrol("PSC")
def fly_poshold_takeoff(self):
"""ensure vehicle stays put until it is ready to fly"""
self.context_push()
ex = None
try:
self.set_parameter("PILOT_TKOFF_ALT", 700)
self.change_mode('POSHOLD')
self.set_rc(3, 1000)
self.wait_ready_to_arm()
self.arm_vehicle()
self.delay_sim_time(2)
# check we are still on the ground...
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
if abs(m.relative_alt) > 100:
raise NotAchievedException("Took off prematurely")
self.progress("Pushing throttle up")
self.set_rc(3, 1710)
self.delay_sim_time(0.5)
self.progress("Bringing back to hover throttle")
self.set_rc(3, 1500)
# make sure we haven't already reached alt:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
max_initial_alt = 2000
if abs(m.relative_alt) > max_initial_alt:
raise NotAchievedException("Took off too fast (%f > %f" %
(abs(m.relative_alt), max_initial_alt))
self.progress("Monitoring takeoff-to-alt")
self.wait_altitude(6.9, 8, relative=True)
self.progress("Making sure we stop at our takeoff altitude")
tstart = self.get_sim_time()
while self.get_sim_time() - tstart < 5:
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
delta = abs(7000 - m.relative_alt)
self.progress("alt=%f delta=%f" % (m.relative_alt/1000,
delta/1000))
if delta > 1000:
raise NotAchievedException("Failed to maintain takeoff alt")
self.progress("takeoff OK")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.land_and_disarm()
self.set_rc(8, 1000)
self.context_pop()
if ex is not None:
raise ex
def initial_mode(self):
return "STABILIZE"
def initial_mode_switch_mode(self):
return "STABILIZE"
def default_mode(self):
return "STABILIZE"
def rc_defaults(self):
ret = super(AutoTestCopter, self).rc_defaults()
ret[3] = 1000
ret[5] = 1800 # mode switch
return ret
def test_manual_control(self):
'''test manual_control mavlink message'''
self.set_parameter("SYSID_MYGCS", self.mav.source_system)
self.change_mode('STABILIZE')
self.takeoff(10)
tstart = self.get_sim_time_cached()
want_pitch_degrees = -12
while True:
if self.get_sim_time_cached() - tstart > 10:
raise AutoTestTimeoutException("Did not reach pitch")
self.progress("Sending pitch-forward")
self.mav.mav.manual_control_send(
1, # target system
500, # x (pitch)
32767, # y (roll)
32767, # z (thrust)
32767, # r (yaw)
0) # button mask
m = self.mav.recv_match(type='ATTITUDE', blocking=True, timeout=1)
print("m=%s" % str(m))
if m is None:
continue
p = math.degrees(m.pitch)
self.progress("pitch=%f want<=%f" % (p, want_pitch_degrees))
if p <= want_pitch_degrees:
break
self.mav.mav.manual_control_send(
1, # target system
32767, # x (pitch)
32767, # y (roll)
32767, # z (thrust)
32767, # r (yaw)
0) # button mask
self.do_RTL()
def check_avoidance_corners(self):
self.takeoff(10, mode="LOITER")
self.set_rc(2, 1400)
west_loc = mavutil.location(-35.363007,
149.164911,
0,
0)
self.wait_location(west_loc, accuracy=6)
north_loc = mavutil.location(-35.362908,
149.165051,
0,
0)
self.reach_heading_manual(0)
self.wait_location(north_loc, accuracy=6, timeout=200)
self.reach_heading_manual(90)
east_loc = mavutil.location(-35.363013,
149.165194,
0,
0)
self.wait_location(east_loc, accuracy=6)
self.reach_heading_manual(225)
self.wait_location(west_loc, accuracy=6, timeout=200)
self.set_rc(2, 1500)
self.do_RTL()
def OBSTACLE_DISTANCE_3D_test_angle(self, angle):
now = self.get_sim_time_cached()
distance = 15
right = distance * math.sin(math.radians(angle))
front = distance * math.cos(math.radians(angle))
down = 0
expected_distance_cm = distance * 100
# expected orientation
expected_orientation = int((angle+22.5)/45) % 8
self.progress("Angle %f expected orient %u" %
(angle, expected_orientation))
tstart = self.get_sim_time()
last_send = 0
while True:
now = self.get_sim_time_cached()
if now - tstart > 10:
raise NotAchievedException("Did not get correct angle back")
if now - last_send > 0.1:
self.progress("ang=%f sending front=%f right=%f" %
(angle, front, right))
self.mav.mav.obstacle_distance_3d_send(
int(now*1000), # time_boot_ms
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER,
mavutil.mavlink.MAV_FRAME_BODY_FRD,
65535,
front, # x (m)
right, # y (m)
down, # z (m)
0, # min_distance (m)
20 # max_distance (m)
)
last_send = now
m = self.mav.recv_match(type="DISTANCE_SENSOR",
blocking=True,
timeout=1)
if m is None:
continue
# self.progress("Got (%s)" % str(m))
if m.orientation != expected_orientation:
# self.progress("Wrong orientation (want=%u got=%u)" %
# (expected_orientation, m.orientation))
continue
if abs(m.current_distance - expected_distance_cm) > 1:
# self.progress("Wrong distance (want=%f got=%f)" %
# (expected_distance_cm, m.current_distance))
continue
self.progress("distance-at-angle good")
break
def OBSTACLE_DISTANCE_3D(self):
self.context_push()
ex = None
try:
self.set_parameters({
"SERIAL5_PROTOCOL": 1,
"PRX_TYPE": 2,
})
self.reboot_sitl()
for angle in range(0, 360):
self.OBSTACLE_DISTANCE_3D_test_angle(angle)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_proximity_avoidance_test_corners(self):
self.start_subtest("Corners")
self.context_push()
ex = None
try:
self.load_fence("copter-avoidance-fence.txt")
self.set_parameter("FENCE_ENABLE", 1)
self.set_parameter("PRX_TYPE", 10)
self.set_parameter("RC10_OPTION", 40) # proximity-enable
self.reboot_sitl()
self.progress("Enabling proximity")
self.set_rc(10, 2000)
self.check_avoidance_corners()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.clear_fence()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_proximity_avoidance_test_alt_no_avoid(self):
self.start_subtest("Alt-no-avoid")
self.context_push()
ex = None
try:
self.set_parameter("PRX_TYPE", 2)
self.set_parameter("AVOID_ALT_MIN", 10)
self.set_analog_rangefinder_parameters()
self.reboot_sitl()
tstart = self.get_sim_time()
self.change_mode('LOITER')
while True:
if self.armed():
break
if self.get_sim_time_cached() - tstart > 60:
raise AutoTestTimeoutException("Did not arm")
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
10, # min_distance cm
500, # max_distance cm
400, # current_distance cm
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
26, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_NONE, # orientation
255 # covariance
)
self.send_cmd(mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,
1, # ARM
0,
0,
0,
0,
0,
0)
self.wait_heartbeat()
self.takeoff(15, mode='LOITER')
self.progress("Poking vehicle; should avoid")
def shove(a, b):
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
10, # min_distance cm
500, # max_distance cm
20, # current_distance cm
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_NONE, # orientation
255 # covariance
)
self.wait_speed_vector_bf(
Vector3(-0.4, 0.0, 0.0),
timeout=10,
called_function=shove,
)
self.change_alt(5)
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 10:
break
vel = self.get_body_frame_velocity()
if vel.length() > 0.3:
raise NotAchievedException("Moved too much (%s)" %
(str(vel),))
shove(None, None)
except Exception as e:
self.progress("Caught exception: %s" %
self.get_exception_stacktrace(e))
ex = e
self.context_pop()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_proximity_avoidance_test(self):
self.fly_proximity_avoidance_test_alt_no_avoid()
self.fly_proximity_avoidance_test_corners()
def fly_fence_avoidance_test(self):
self.context_push()
ex = None
try:
self.load_fence("copter-avoidance-fence.txt")
self.set_parameter("FENCE_ENABLE", 1)
self.check_avoidance_corners()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.clear_fence()
self.disarm_vehicle(force=True)
if ex is not None:
raise ex
def global_position_int_for_location(self, loc, time_boot, heading=0):
return self.mav.mav.global_position_int_encode(
int(time_boot * 1000), # time_boot_ms
int(loc.lat * 1e7),
int(loc.lng * 1e7),
int(loc.alt * 1000), # alt in mm
20, # relative alt - urp.
vx=0,
vy=0,
vz=0,
hdg=heading
)
def fly_follow_mode(self):
self.set_parameter("FOLL_ENABLE", 1)
self.set_parameter("FOLL_SYSID", self.mav.source_system)
foll_ofs_x = 30 # metres
self.set_parameter("FOLL_OFS_X", -foll_ofs_x)
self.set_parameter("FOLL_OFS_TYPE", 1) # relative to other vehicle heading
self.takeoff(10, mode="LOITER")
self.set_parameter("SIM_SPEEDUP", 1)
self.change_mode("FOLLOW")
new_loc = self.mav.location()
new_loc_offset_n = 20
new_loc_offset_e = 30
self.location_offset_ne(new_loc, new_loc_offset_n, new_loc_offset_e)
self.progress("new_loc: %s" % str(new_loc))
heading = 0
if self.mavproxy is not None:
self.mavproxy.send("map icon %f %f greenplane %f\n" %
(new_loc.lat, new_loc.lng, heading))
expected_loc = copy.copy(new_loc)
self.location_offset_ne(expected_loc, -foll_ofs_x, 0)
if self.mavproxy is not None:
self.mavproxy.send("map icon %f %f hoop\n" %
(expected_loc.lat, expected_loc.lng))
self.progress("expected_loc: %s" % str(expected_loc))
last_sent = 0
tstart = self.get_sim_time()
while True:
now = self.get_sim_time_cached()
if now - tstart > 60:
raise NotAchievedException("Did not FOLLOW")
if now - last_sent > 0.5:
gpi = self.global_position_int_for_location(new_loc,
now,
heading=heading)
gpi.pack(self.mav.mav)
self.mav.mav.send(gpi)
self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
pos = self.mav.location()
delta = self.get_distance(expected_loc, pos)
max_delta = 3
self.progress("position delta=%f (want <%f)" % (delta, max_delta))
if delta < max_delta:
break
self.do_RTL()
def get_global_position_int(self, timeout=30):
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Did not get good global_position_int")
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True, timeout=1)
self.progress("GPI: %s" % str(m))
if m is None:
continue
if m.lat != 0 or m.lon != 0:
return m
def fly_beacon_position(self):
self.reboot_sitl()
self.wait_ready_to_arm(require_absolute=True)
old_pos = self.get_global_position_int()
print("old_pos=%s" % str(old_pos))
self.context_push()
ex = None
try:
self.set_parameter("BCN_TYPE", 10)
self.set_parameter("BCN_LATITUDE", SITL_START_LOCATION.lat)
self.set_parameter("BCN_LONGITUDE", SITL_START_LOCATION.lng)
self.set_parameter("BCN_ALT", SITL_START_LOCATION.alt)
self.set_parameter("BCN_ORIENT_YAW", 0)
self.set_parameter("AVOID_ENABLE", 4)
self.set_parameter("GPS_TYPE", 0)
self.set_parameter("EK3_ENABLE", 1)
self.set_parameter("EK3_SRC1_POSXY", 4) # Beacon
self.set_parameter("EK3_SRC1_POSZ", 1) # Baro
self.set_parameter("EK3_SRC1_VELXY", 0) # None
self.set_parameter("EK3_SRC1_VELZ", 0) # None
self.set_parameter("EK2_ENABLE", 0)
self.set_parameter("AHRS_EKF_TYPE", 3)
self.reboot_sitl()
# turn off GPS arming checks. This may be considered a
# bug that we need to do this.
old_arming_check = int(self.get_parameter("ARMING_CHECK"))
if old_arming_check == 1:
old_arming_check = 1 ^ 25 - 1
new_arming_check = int(old_arming_check) & ~(1 << 3)
self.set_parameter("ARMING_CHECK", new_arming_check)
self.reboot_sitl()
# require_absolute=True infers a GPS is present
self.wait_ready_to_arm(require_absolute=False)
tstart = self.get_sim_time()
timeout = 20
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Did not get new position like old position")
self.progress("Fetching location")
new_pos = self.get_global_position_int()
pos_delta = self.get_distance_int(old_pos, new_pos)
max_delta = 1
self.progress("delta=%u want <= %u" % (pos_delta, max_delta))
if pos_delta <= max_delta:
break
self.progress("Moving to ensure location is tracked")
self.takeoff(10, mode="STABILIZE")
self.change_mode("CIRCLE")
tstart = self.get_sim_time()
max_delta = 0
max_allowed_delta = 10
while True:
if self.get_sim_time_cached() - tstart > timeout:
break
pos_delta = self.get_distance_int(self.sim_location_int(), self.get_global_position_int())
self.progress("pos_delta=%f max_delta=%f max_allowed_delta=%f" % (pos_delta, max_delta, max_allowed_delta))
if pos_delta > max_delta:
max_delta = pos_delta
if pos_delta > max_allowed_delta:
raise NotAchievedException("Vehicle location not tracking simulated location (%f > %f)" %
(pos_delta, max_allowed_delta))
self.progress("Tracked location just fine (max_delta=%f)" % max_delta)
self.change_mode("LOITER")
self.wait_groundspeed(0, 0.3, timeout=120)
self.land_and_disarm()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def fly_beacon_avoidance_test(self):
self.context_push()
ex = None
try:
self.set_parameter("BCN_TYPE", 10)
self.set_parameter("BCN_LATITUDE", int(SITL_START_LOCATION.lat))
self.set_parameter("BCN_LONGITUDE", int(SITL_START_LOCATION.lng))
self.set_parameter("BCN_ORIENT_YAW", 45)
self.set_parameter("AVOID_ENABLE", 4)
self.reboot_sitl()
self.takeoff(10, mode="LOITER")
self.set_rc(2, 1400)
west_loc = mavutil.location(-35.362919, 149.165055, 0, 0)
self.wait_location(west_loc, accuracy=7)
self.reach_heading_manual(0)
north_loc = mavutil.location(-35.362881, 149.165103, 0, 0)
self.wait_location(north_loc, accuracy=7)
self.set_rc(2, 1500)
self.set_rc(1, 1600)
east_loc = mavutil.location(-35.362986, 149.165227, 0, 0)
self.wait_location(east_loc, accuracy=7)
self.set_rc(1, 1500)
self.set_rc(2, 1600)
south_loc = mavutil.location(-35.363025, 149.165182, 0, 0)
self.wait_location(south_loc, accuracy=7)
self.set_rc(2, 1500)
self.do_RTL()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.clear_fence()
self.disarm_vehicle(force=True)
self.reboot_sitl()
if ex is not None:
raise ex
def fly_wind_baro_compensation(self):
self.context_push()
ex = None
try:
self.customise_SITL_commandline(
["--defaults", ','.join(self.model_defaults_filepath('Callisto'))],
model="octa-quad:@ROMFS/models/Callisto.json",
wipe=True,
)
wind_spd_truth = 8.0
wind_dir_truth = 90.0
self.set_parameter("EK3_ENABLE", 1)
self.set_parameter("EK2_ENABLE", 0)
self.set_parameter("AHRS_EKF_TYPE", 3)
self.set_parameter("BARO1_WCF_ENABLE", 1.000000)
self.reboot_sitl()
self.set_parameter("EK3_DRAG_BCOEF_X", 361.000000)
self.set_parameter("EK3_DRAG_BCOEF_Y", 361.000000)
self.set_parameter("EK3_DRAG_MCOEF", 0.082000)
self.set_parameter("BARO1_WCF_FWD", -0.300000)
self.set_parameter("BARO1_WCF_BCK", -0.300000)
self.set_parameter("BARO1_WCF_RGT", 0.300000)
self.set_parameter("BARO1_WCF_LFT", 0.300000)
self.set_parameter("SIM_BARO_WCF_FWD", -0.300000)
self.set_parameter("SIM_BARO_WCF_BAK", -0.300000)
self.set_parameter("SIM_BARO_WCF_RGT", 0.300000)
self.set_parameter("SIM_BARO_WCF_LFT", 0.300000)
self.set_parameter("SIM_WIND_DIR", wind_dir_truth)
self.set_parameter("SIM_WIND_SPD", wind_spd_truth)
self.set_parameter("SIM_WIND_T", 1.000000)
self.reboot_sitl()
# require_absolute=True infers a GPS is present
self.wait_ready_to_arm(require_absolute=False)
self.progress("Climb to 20m in LOITER and yaw spin for 30 seconds")
self.takeoff(10, mode="LOITER")
self.set_rc(4, 1400)
self.delay_sim_time(30)
# check wind esitmates
m = self.mav.recv_match(type='WIND', blocking=True)
speed_error = abs(m.speed - wind_spd_truth)
angle_error = abs(m.direction - wind_dir_truth)
if (speed_error > 1.0):
raise NotAchievedException("Wind speed incorrect - want %f +-1 got %f m/s" % (wind_spd_truth, m.speed))
if (angle_error > 15.0):
raise NotAchievedException(
"Wind direction incorrect - want %f +-15 got %f deg" %
(wind_dir_truth, m.direction))
self.progress("Wind estimate is good, now check height variation for 30 seconds")
# check height stability over another 30 seconds
z_min = 1E6
z_max = -1E6
tstart = self.get_sim_time()
while (self.get_sim_time() < tstart + 30):
m = self.mav.recv_match(type='LOCAL_POSITION_NED', blocking=True)
if (m.z > z_max):
z_max = m.z
if (m.z < z_min):
z_min = m.z
if (z_max-z_min > 0.5):
raise NotAchievedException("Height variation is excessive")
self.progress("Height variation is good")
self.set_rc(4, 1500)
self.land_and_disarm()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.disarm_vehicle(force=True)
self.reboot_sitl()
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def wait_generator_speed_and_state(self, rpm_min, rpm_max, want_state, timeout=240):
self.drain_mav()
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > timeout:
raise NotAchievedException("Did not move to state/speed")
m = self.mav.recv_match(type="GENERATOR_STATUS", blocking=True, timeout=10)
if m is None:
raise NotAchievedException("Did not get GENERATOR_STATUS")
if m.generator_speed < rpm_min:
self.progress("Too slow (%u<%u)" % (m.generator_speed, rpm_min))
continue
if m.generator_speed > rpm_max:
self.progress("Too fast (%u>%u)" % (m.generator_speed, rpm_max))
continue
if m.status != want_state:
self.progress("Wrong state (got=%u want=%u)" % (m.status, want_state))
break
self.progress("Got generator speed and state")
def test_richenpower(self):
self.set_parameter("SERIAL5_PROTOCOL", 30)
self.set_parameter("SIM_RICH_ENABLE", 1)
self.set_parameter("SERVO8_FUNCTION", 42)
self.set_parameter("SIM_RICH_CTRL", 8)
self.set_parameter("RC9_OPTION", 85)
self.set_parameter("LOG_DISARMED", 1)
self.set_parameter("BATT2_MONITOR", 17)
self.set_parameter("GEN_TYPE", 3)
self.reboot_sitl()
self.set_rc(9, 1000) # remember this is a switch position - stop
self.customise_SITL_commandline(["--uartF=sim:richenpower"])
self.wait_statustext("requested state is not RUN", timeout=60)
self.set_message_rate_hz("GENERATOR_STATUS", 10)
self.drain_mav_unparsed()
self.wait_generator_speed_and_state(0, 0, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_OFF)
messages = []
def my_message_hook(mav, m):
if m.get_type() != 'STATUSTEXT':
return
messages.append(m)
self.install_message_hook(my_message_hook)
try:
self.set_rc(9, 2000) # remember this is a switch position - run
finally:
self.remove_message_hook(my_message_hook)
if "Generator HIGH" not in [x.text for x in messages]:
self.wait_statustext("Generator HIGH", timeout=60)
self.set_rc(9, 1000) # remember this is a switch position - stop
self.wait_statustext("requested state is not RUN", timeout=200)
self.set_rc(9, 1500) # remember this is a switch position - idle
self.wait_generator_speed_and_state(3000, 8000, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_IDLE)
self.set_rc(9, 2000) # remember this is a switch position - run
# self.wait_generator_speed_and_state(3000, 30000, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_WARMING_UP)
self.wait_generator_speed_and_state(8000, 30000, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_GENERATING)
bs = self.mav.recv_match(
type="BATTERY_STATUS",
condition="BATTERY_STATUS.id==1", # id is zero-indexed
timeout=1,
blocking=True
)
if bs is None:
raise NotAchievedException("Did not receive BATTERY_STATUS")
self.progress("Received battery status: %s" % str(bs))
want_bs_volt = 50000
if bs.voltages[0] != want_bs_volt:
raise NotAchievedException("Battery voltage not as expected (want=%f) got=(%f)" % (want_bs_volt, bs.voltages[0],))
self.progress("Moving *back* to idle")
self.set_rc(9, 1500) # remember this is a switch position - idle
self.wait_generator_speed_and_state(3000, 10000, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_IDLE)
self.progress("Moving *back* to run")
self.set_rc(9, 2000) # remember this is a switch position - run
self.wait_generator_speed_and_state(8000, 30000, mavutil.mavlink.MAV_GENERATOR_STATUS_FLAG_GENERATING)
self.set_message_rate_hz("GENERATOR_STATUS", -1)
self.set_parameter("LOG_DISARMED", 0)
if not self.current_onboard_log_contains_message("GEN"):
raise NotAchievedException("Did not find expected GEN message")
def test_ie24(self):
self.context_push()
ex = None
try:
self.set_parameter("SERIAL5_PROTOCOL", 30)
self.set_parameter("SERIAL5_BAUD", 115200)
self.set_parameter("GEN_TYPE", 2)
self.set_parameter("BATT2_MONITOR", 17)
self.set_parameter("SIM_IE24_ENABLE", 1)
self.set_parameter("LOG_DISARMED", 1)
self.customise_SITL_commandline(["--uartF=sim:ie24"])
self.wait_ready_to_arm()
self.arm_vehicle()
self.disarm_vehicle()
# Test for pre-arm check fail when state is not running
self.start_subtest("If you haven't taken off generator error should cause instant failsafe and disarm")
self.set_parameter("SIM_IE24_STATE", 8)
self.wait_statustext("Status not running", timeout=40)
self.try_arm(result=False,
expect_msg="Status not running")
self.set_parameter("SIM_IE24_STATE", 2) # Explicitly set state to running
# Test that error code does result in failsafe
self.start_subtest("If you haven't taken off generator error should cause instant failsafe and disarm")
self.change_mode("STABILIZE")
self.set_parameter("DISARM_DELAY", 0)
self.arm_vehicle()
self.set_parameter("SIM_IE24_ERROR", 30)
self.disarm_wait(timeout=1)
self.set_parameter("SIM_IE24_ERROR", 0)
self.set_parameter("DISARM_DELAY", 10)
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def test_aux_switch_options(self):
self.set_parameter("RC7_OPTION", 58) # clear waypoints
self.load_mission("copter_loiter_to_alt.txt")
self.set_rc(7, 1000)
self.assert_mission_count(5)
self.progress("Clear mission")
self.set_rc(7, 2000)
self.delay_sim_time(1) # allow switch to debounce
self.assert_mission_count(0)
self.set_rc(7, 1000)
self.set_parameter("RC7_OPTION", 24) # reset mission
self.delay_sim_time(2)
self.load_mission("copter_loiter_to_alt.txt")
set_wp = 4
self.set_current_waypoint(set_wp)
self.wait_current_waypoint(set_wp, timeout=10)
self.progress("Reset mission")
self.set_rc(7, 2000)
self.delay_sim_time(1)
self.drain_mav()
self.wait_current_waypoint(0, timeout=10)
self.set_rc(7, 1000)
def test_aux_functions_in_mission(self):
self.load_mission("aux_functions.txt")
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.change_mode('AUTO')
self.set_rc(3, 1500)
self.wait_mode('ALT_HOLD')
self.change_mode('AUTO')
self.wait_rtl_complete()
def fly_rangefinder_drivers_fly(self, rangefinders):
'''ensure rangefinder gives height-above-ground'''
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
expected_alt = 5
self.user_takeoff(alt_min=expected_alt)
rf = self.mav.recv_match(type="RANGEFINDER", timeout=1, blocking=True)
if rf is None:
raise NotAchievedException("Did not receive rangefinder message")
gpi = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True, timeout=1)
if gpi is None:
raise NotAchievedException("Did not receive GLOBAL_POSITION_INT message")
if abs(rf.distance - gpi.relative_alt/1000.0) > 1:
raise NotAchievedException(
"rangefinder alt (%s) disagrees with global-position-int.relative_alt (%s)" %
(rf.distance, gpi.relative_alt/1000.0)
)
for i in range(0, len(rangefinders)):
name = rangefinders[i]
self.progress("i=%u (%s)" % (i, name))
ds = self.mav.recv_match(
type="DISTANCE_SENSOR",
timeout=2,
blocking=True,
condition="DISTANCE_SENSOR.id==%u" % i
)
if ds is None:
raise NotAchievedException("Did not receive DISTANCE_SENSOR message for id==%u (%s)" % (i, name))
self.progress("Got: %s" % str(ds))
if abs(ds.current_distance/100.0 - gpi.relative_alt/1000.0) > 1:
raise NotAchievedException(
"distance sensor.current_distance (%f) (%s) disagrees with global-position-int.relative_alt (%s)" %
(ds.current_distance/100.0, name, gpi.relative_alt/1000.0))
self.land_and_disarm()
self.progress("Ensure RFND messages in log")
if not self.current_onboard_log_contains_message("RFND"):
raise NotAchievedException("No RFND messages in log")
def fly_proximity_mavlink_distance_sensor(self):
self.start_subtest("Test mavlink proximity sensor using DISTANCE_SENSOR messages") # noqa
self.context_push()
ex = None
try:
self.set_parameter("SERIAL5_PROTOCOL", 1)
self.set_parameter("PRX_TYPE", 2) # mavlink
self.reboot_sitl()
self.progress("Should be unhealthy while we don't send messages")
self.assert_sensor_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_PROXIMITY, True, True, False)
self.progress("Should be healthy while we're sending good messages")
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 5:
raise NotAchievedException("Sensor did not come good")
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
10, # min_distance cm
50, # max_distance cm
20, # current_distance cm
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_NONE, # orientation
255 # covariance
)
if self.sensor_has_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_PROXIMITY, True, True, True):
self.progress("Sensor has good state")
break
self.delay_sim_time(0.1)
self.progress("Should be unhealthy again if we stop sending messages")
self.delay_sim_time(1)
self.assert_sensor_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_PROXIMITY, True, True, False)
# now make sure we get echoed back the same sorts of things we send:
# distances are in cm
distance_map = {
mavutil.mavlink.MAV_SENSOR_ROTATION_NONE: 30,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_45: 35,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_90: 20,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_135: 15,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_180: 70,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_225: 80,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_270: 10,
mavutil.mavlink.MAV_SENSOR_ROTATION_YAW_315: 90,
}
wanted_distances = copy.copy(distance_map)
sensor_enum = mavutil.mavlink.enums["MAV_SENSOR_ORIENTATION"]
def my_message_hook(mav, m):
if m.get_type() != 'DISTANCE_SENSOR':
return
self.progress("Got (%s)" % str(m))
want = distance_map[m.orientation]
got = m.current_distance
# ArduPilot's floating point conversions make it imprecise:
delta = abs(want-got)
if delta > 1:
self.progress(
"Wrong distance (%s): want=%f got=%f" %
(sensor_enum[m.orientation].name, want, got))
return
if m.orientation not in wanted_distances:
return
self.progress(
"Correct distance (%s): want=%f got=%f" %
(sensor_enum[m.orientation].name, want, got))
del wanted_distances[m.orientation]
self.install_message_hook_context(my_message_hook)
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 5:
raise NotAchievedException("Sensor did not give right distances") # noqa
for (orient, dist) in distance_map.items():
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
10, # min_distance cm
90, # max_distance cm
dist, # current_distance cm
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
orient, # orientation
255 # covariance
)
self.wait_heartbeat()
if len(wanted_distances.keys()) == 0:
break
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def fly_rangefinder_mavlink_distance_sensor(self):
self.start_subtest("Test mavlink rangefinder using DISTANCE_SENSOR messages")
self.context_push()
self.set_parameter('RTL_ALT_TYPE', 0)
ex = None
try:
self.set_parameter("SERIAL5_PROTOCOL", 1)
self.set_parameter("RNGFND1_TYPE", 10)
self.reboot_sitl()
self.set_parameter("RNGFND1_MAX_CM", 32767)
self.progress("Should be unhealthy while we don't send messages")
self.assert_sensor_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION, True, True, False)
self.progress("Should be healthy while we're sending good messages")
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 5:
raise NotAchievedException("Sensor did not come good")
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
10, # min_distance
50, # max_distance
20, # current_distance
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_PITCH_270, # orientation
255 # covariance
)
if self.sensor_has_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION, True, True, True):
self.progress("Sensor has good state")
break
self.delay_sim_time(0.1)
self.progress("Should be unhealthy again if we stop sending messages")
self.delay_sim_time(1)
self.assert_sensor_state(mavutil.mavlink.MAV_SYS_STATUS_SENSOR_LASER_POSITION, True, True, False)
self.progress("Landing gear should deploy with current_distance below min_distance")
self.change_mode('STABILIZE')
self.wait_ready_to_arm()
self.arm_vehicle()
self.set_parameter("SERVO10_FUNCTION", 29)
self.set_parameter("LGR_DEPLOY_ALT", 1)
self.set_parameter("LGR_RETRACT_ALT", 10) # metres
self.delay_sim_time(1) # servo function maps only periodically updated
# self.send_debug_trap()
self.run_cmd(
mavutil.mavlink.MAV_CMD_AIRFRAME_CONFIGURATION,
0,
0, # deploy
0,
0,
0,
0,
0
)
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
100, # min_distance (cm)
2500, # max_distance (cm)
200, # current_distance (cm)
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_PITCH_270, # orientation
255 # covariance
)
self.context_collect("STATUSTEXT")
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 5:
raise NotAchievedException("Retraction did not happen")
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
100, # min_distance (cm)
6000, # max_distance (cm)
1500, # current_distance (cm)
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_PITCH_270, # orientation
255 # covariance
)
self.delay_sim_time(0.1)
try:
self.wait_text("LandingGear: RETRACT", check_context=True, timeout=0.1)
except Exception:
continue
self.progress("Retracted")
break
# self.send_debug_trap()
while True:
if self.get_sim_time_cached() - tstart > 5:
raise NotAchievedException("Deployment did not happen")
self.progress("Sending distance-sensor message")
self.mav.mav.distance_sensor_send(
0, # time_boot_ms
300, # min_distance
500, # max_distance
250, # current_distance
mavutil.mavlink.MAV_DISTANCE_SENSOR_LASER, # type
21, # id
mavutil.mavlink.MAV_SENSOR_ROTATION_PITCH_270, # orientation
255 # covariance
)
try:
self.wait_text("LandingGear: DEPLOY", check_context=True, timeout=0.1)
except Exception:
continue
self.progress("Deployed")
break
self.disarm_vehicle()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def test_gsf(self):
'''test the Gaussian Sum filter'''
ex = None
self.context_push()
try:
self.set_parameter("EK2_ENABLE", 1)
self.reboot_sitl()
self.takeoff(20, mode='LOITER')
self.set_rc(2, 1400)
self.delay_sim_time(5)
self.set_rc(2, 1500)
self.progress("Path: %s" % self.current_onboard_log_filepath())
dfreader = self.dfreader_for_current_onboard_log()
self.do_RTL()
except Exception as e:
self.progress("Caught exception: %s" %
self.get_exception_stacktrace(e))
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
# ensure log messages present
want = set(["XKY0", "XKY1", "NKY0", "NKY1"])
still_want = want
while len(still_want):
m = dfreader.recv_match(type=want)
if m is None:
raise NotAchievedException("Did not get %s" % want)
still_want.remove(m.get_type())
def fly_rangefinder_mavlink(self):
self.fly_rangefinder_mavlink_distance_sensor()
# explicit test for the mavlink driver as it doesn't play so nice:
self.set_parameter("SERIAL5_PROTOCOL", 1)
self.set_parameter("RNGFND1_TYPE", 10)
self.customise_SITL_commandline(['--uartF=sim:rf_mavlink'])
self.change_mode('GUIDED')
self.wait_ready_to_arm()
self.arm_vehicle()
expected_alt = 5
self.user_takeoff(alt_min=expected_alt)
tstart = self.get_sim_time()
while True:
if self.get_sim_time() - tstart > 5:
raise NotAchievedException("Mavlink rangefinder not working")
rf = self.mav.recv_match(type="RANGEFINDER", timeout=1, blocking=True)
if rf is None:
raise NotAchievedException("Did not receive rangefinder message")
gpi = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True, timeout=1)
if gpi is None:
raise NotAchievedException("Did not receive GLOBAL_POSITION_INT message")
if abs(rf.distance - gpi.relative_alt/1000.0) > 1:
print("rangefinder alt (%s) disagrees with global-position-int.relative_alt (%s)" %
(rf.distance, gpi.relative_alt/1000.0))
continue
ds = self.mav.recv_match(
type="DISTANCE_SENSOR",
timeout=2,
blocking=True,
)
if ds is None:
raise NotAchievedException("Did not receive DISTANCE_SENSOR message")
self.progress("Got: %s" % str(ds))
if abs(ds.current_distance/100.0 - gpi.relative_alt/1000.0) > 1:
print(
"distance sensor.current_distance (%f) disagrees with global-position-int.relative_alt (%s)" %
(ds.current_distance/100.0, gpi.relative_alt/1000.0))
continue
break
self.progress("mavlink rangefinder OK")
self.land_and_disarm()
def fly_rangefinder_driver_maxbotix(self):
ex = None
try:
self.context_push()
self.start_subtest("No messages")
rf = self.mav.recv_match(type="DISTANCE_SENSOR", timeout=5, blocking=True)
if rf is not None:
raise NotAchievedException("Receiving DISTANCE_SENSOR when I shouldn't be")
self.start_subtest("Default address")
self.set_parameter("RNGFND1_TYPE", 2) # maxbotix
self.reboot_sitl()
self.do_timesync_roundtrip()
rf = self.mav.recv_match(type="DISTANCE_SENSOR", timeout=5, blocking=True)
self.progress("Got (%s)" % str(rf))
if rf is None:
raise NotAchievedException("Didn't receive DISTANCE_SENSOR when I should've")
self.start_subtest("Explicitly set to default address")
self.set_parameter("RNGFND1_TYPE", 2) # maxbotix
self.set_parameter("RNGFND1_ADDR", 0x70)
self.reboot_sitl()
self.do_timesync_roundtrip()
rf = self.mav.recv_match(type="DISTANCE_SENSOR", timeout=5, blocking=True)
self.progress("Got (%s)" % str(rf))
if rf is None:
raise NotAchievedException("Didn't receive DISTANCE_SENSOR when I should've")
self.start_subtest("Explicitly set to non-default address")
self.set_parameter("RNGFND1_ADDR", 0x71)
self.reboot_sitl()
self.do_timesync_roundtrip()
rf = self.mav.recv_match(type="DISTANCE_SENSOR", timeout=5, blocking=True)
self.progress("Got (%s)" % str(rf))
if rf is None:
raise NotAchievedException("Didn't receive DISTANCE_SENSOR when I should've")
self.start_subtest("Two MaxBotix RangeFinders")
self.set_parameter("RNGFND1_TYPE", 2) # maxbotix
self.set_parameter("RNGFND1_ADDR", 0x70)
self.set_parameter("RNGFND1_MIN_CM", 150)
self.set_parameter("RNGFND2_TYPE", 2) # maxbotix
self.set_parameter("RNGFND2_ADDR", 0x71)
self.set_parameter("RNGFND2_MIN_CM", 250)
self.reboot_sitl()
self.do_timesync_roundtrip()
for i in [0, 1]:
rf = self.mav.recv_match(
type="DISTANCE_SENSOR",
timeout=5,
blocking=True,
condition="DISTANCE_SENSOR.id==%u" % i
)
self.progress("Got id==%u (%s)" % (i, str(rf)))
if rf is None:
raise NotAchievedException("Didn't receive DISTANCE_SENSOR when I should've")
expected_dist = 150
if i == 1:
expected_dist = 250
if rf.min_distance != expected_dist:
raise NotAchievedException("Unexpected min_cm (want=%u got=%u)" %
(expected_dist, rf.min_distance))
self.context_pop()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.reboot_sitl()
if ex is not None:
raise ex
def fly_rangefinder_drivers(self):
self.set_parameter("RTL_ALT", 500)
self.set_parameter("RTL_ALT_TYPE", 1)
drivers = [
("lightwareserial", 8), # autodetected between this and -binary
("lightwareserial-binary", 8),
("ulanding_v0", 11),
("ulanding_v1", 11),
("leddarone", 12),
("maxsonarseriallv", 13),
("nmea", 17),
("wasp", 18),
("benewake_tf02", 19),
("blping", 23),
("benewake_tfmini", 20),
("lanbao", 26),
("benewake_tf03", 27),
("gyus42v2", 31),
]
while len(drivers):
do_drivers = drivers[0:3]
drivers = drivers[3:]
command_line_args = []
for (offs, cmdline_argument, serial_num) in [(0, '--uartE', 4),
(1, '--uartF', 5),
(2, '--uartG', 6)]:
if len(do_drivers) > offs:
(sim_name, rngfnd_param_value) = do_drivers[offs]
command_line_args.append("%s=sim:%s" %
(cmdline_argument, sim_name))
serial_param_name = "SERIAL%u_PROTOCOL" % serial_num
self.set_parameter(serial_param_name, 9) # rangefinder
self.set_parameter("RNGFND%u_TYPE" % (offs+1), rngfnd_param_value)
self.customise_SITL_commandline(command_line_args)
self.fly_rangefinder_drivers_fly([x[0] for x in do_drivers])
self.fly_rangefinder_mavlink()
i2c_drivers = [
("maxbotixi2cxl", 2),
]
while len(i2c_drivers):
do_drivers = i2c_drivers[0:9]
i2c_drivers = i2c_drivers[9:]
count = 1
for d in do_drivers:
(sim_name, rngfnd_param_value) = d
self.set_parameter("RNGFND%u_TYPE" % count, rngfnd_param_value)
count += 1
self.reboot_sitl()
self.fly_rangefinder_drivers_fly([x[0] for x in do_drivers])
def fly_ship_takeoff(self):
# test ship takeoff
self.wait_groundspeed(0, 2)
self.set_parameter("SIM_SHIP_ENABLE", 1)
self.set_parameter("SIM_SHIP_SPEED", 10)
self.set_parameter("SIM_SHIP_DSIZE", 2)
self.wait_ready_to_arm()
# we should be moving with the ship
self.wait_groundspeed(9, 11)
self.takeoff(10)
# above ship our speed drops to 0
self.wait_groundspeed(0, 2)
self.land_and_disarm()
# ship will have moved on, so we land on the water which isn't moving
self.wait_groundspeed(0, 2)
def test_parameter_validation(self):
# wait 10 seconds for initialisation
self.delay_sim_time(10)
self.progress("invalid; min must be less than max:")
self.set_parameter("MOT_PWM_MIN", 100)
self.set_parameter("MOT_PWM_MAX", 50)
self.drain_mav()
self.assert_prearm_failure("Check MOT_PWM_MIN/MAX")
self.progress("invalid; min must be less than max (equal case):")
self.set_parameter("MOT_PWM_MIN", 100)
self.set_parameter("MOT_PWM_MAX", 100)
self.drain_mav()
self.assert_prearm_failure("Check MOT_PWM_MIN/MAX")
def test_alt_estimate_prearm(self):
self.context_push()
ex = None
try:
# disable barometer so there is no altitude source
self.set_parameter("SIM_BARO_DISABLE", 1)
self.set_parameter("SIM_BARO2_DISABL", 1)
self.wait_gps_disable(position_vertical=True)
# turn off arming checks (mandatory arming checks will still be run)
self.set_parameter("ARMING_CHECK", 0)
# delay 12 sec to allow EKF to lose altitude estimate
self.delay_sim_time(12)
self.change_mode("ALT_HOLD")
self.assert_prearm_failure("Need Alt Estimate")
# force arm vehicle in stabilize to bypass barometer pre-arm checks
self.change_mode("STABILIZE")
self.arm_vehicle()
self.set_rc(3, 1700)
try:
self.change_mode("ALT_HOLD", timeout=10)
except AutoTestTimeoutException:
self.progress("PASS not able to set mode without Position : %s" % "ALT_HOLD")
# check that mode change to ALT_HOLD has failed (it should)
if self.mode_is("ALT_HOLD"):
raise NotAchievedException("Changed to ALT_HOLD with no altitude estimate")
except Exception as e:
self.print_exception_caught(e)
ex = e
self.context_pop()
self.disarm_vehicle(force=True)
if ex is not None:
raise ex
def test_ekf_source(self):
self.context_push()
ex = None
try:
self.set_parameter("EK3_ENABLE", 1)
self.set_parameter("AHRS_EKF_TYPE", 3)
self.wait_ready_to_arm()
self.start_subtest("bad yaw source")
self.set_parameter("EK3_SRC3_YAW", 17)
self.assert_prearm_failure("Check EK3_SRC3_YAW")
self.context_push()
self.start_subtest("missing required yaw source")
self.set_parameter("EK3_SRC3_YAW", 3) # External Yaw with Compass Fallback
self.set_parameter("COMPASS_USE", 0)
self.set_parameter("COMPASS_USE2", 0)
self.set_parameter("COMPASS_USE3", 0)
self.assert_prearm_failure("EK3 sources require Compass")
self.context_pop()
except Exception as e:
self.disarm_vehicle(force=True)
self.print_exception_caught(e)
ex = e
self.context_pop()
if ex is not None:
raise ex
def test_replay_gps_bit(self):
self.set_parameters({
"LOG_REPLAY": 1,
"LOG_DISARMED": 1,
"EK3_ENABLE": 1,
"EK2_ENABLE": 1,
"AHRS_TRIM_X": 0.01,
"AHRS_TRIM_Y": -0.03,
"GPS_TYPE2": 1,
"GPS_POS1_X": 0.1,
"GPS_POS1_Y": 0.2,
"GPS_POS1_Z": 0.3,
"GPS_POS2_X": -0.1,
"GPS_POS2_Y": -0.02,
"GPS_POS2_Z": -0.31,
"INS_POS1_X": 0.12,
"INS_POS1_Y": 0.14,
"INS_POS1_Z": -0.02,
"INS_POS2_X": 0.07,
"INS_POS2_Y": 0.012,
"INS_POS2_Z": -0.06,
"RNGFND1_TYPE": 1,
"RNGFND1_PIN": 0,
"RNGFND1_SCALING": 30,
"RNGFND1_POS_X": 0.17,
"RNGFND1_POS_Y": -0.07,
"RNGFND1_POS_Z": -0.005,
"SIM_SONAR_SCALE": 30,
"SIM_GPS2_DISABLE": 0,
})
self.reboot_sitl()
current_log_filepath = self.current_onboard_log_filepath()
self.progress("Current log path: %s" % str(current_log_filepath))
self.change_mode("LOITER")
self.wait_ready_to_arm(require_absolute=True)
self.arm_vehicle()
self.takeoffAndMoveAway()
self.do_RTL()
self.reboot_sitl()
return current_log_filepath
def test_replay_beacon_bit(self):
self.set_parameter("LOG_REPLAY", 1)
self.set_parameter("LOG_DISARMED", 1)
old_onboard_logs = sorted(self.log_list())
self.fly_beacon_position()
new_onboard_logs = sorted(self.log_list())
log_difference = [x for x in new_onboard_logs if x not in old_onboard_logs]
return log_difference[2]
def test_replay_optical_flow_bit(self):
self.set_parameter("LOG_REPLAY", 1)
self.set_parameter("LOG_DISARMED", 1)
old_onboard_logs = sorted(self.log_list())
self.fly_optical_flow_limits()
new_onboard_logs = sorted(self.log_list())
log_difference = [x for x in new_onboard_logs if x not in old_onboard_logs]
print("log difference: %s" % str(log_difference))
return log_difference[0]
def test_gps_blending(self):
'''ensure we get dataflash log messages for blended instance'''
self.context_push()
ex = None
try:
# configure:
self.set_parameter("GPS_TYPE2", 1)
self.set_parameter("SIM_GPS2_TYPE", 1)
self.set_parameter("SIM_GPS2_DISABLE", 0)
self.set_parameter("GPS_AUTO_SWITCH", 2)
self.reboot_sitl()
# ensure we're seeing the second GPS:
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 60:
raise NotAchievedException("Did not get good GPS2_RAW message")
m = self.mav.recv_match(type='GPS2_RAW', blocking=True, timeout=1)
self.progress("%s" % str(m))
if m is None:
continue
if m.lat == 0:
continue
break
# create a log we can expect blended data to appear in:
self.change_mode('LOITER')
self.wait_ready_to_arm()
self.arm_vehicle()
self.delay_sim_time(5)
self.disarm_vehicle()
# inspect generated log for messages:
dfreader = self.dfreader_for_current_onboard_log()
wanted = set([0, 1, 2])
seen_primary_change = False
while True:
m = dfreader.recv_match(type=["GPS", "EV"]) # disarmed
if m is None:
break
mtype = m.get_type()
if mtype == 'GPS':
try:
wanted.remove(m.I)
except KeyError:
continue
elif mtype == 'EV':
if m.Id == 67: # GPS_PRIMARY_CHANGED
seen_primary_change = True
if len(wanted) == 0 and seen_primary_change:
break
if len(wanted):
raise NotAchievedException("Did not get all three GPS types")
if not seen_primary_change:
raise NotAchievedException("Did not see primary change")
except Exception as e:
self.progress("Caught exception: %s" %
self.get_exception_stacktrace(e))
ex = e
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def test_callisto(self):
self.customise_SITL_commandline(
["--defaults", ','.join(self.model_defaults_filepath('Callisto')), ],
model="octa-quad:@ROMFS/models/Callisto.json",
wipe=True,
)
self.takeoff(10)
self.do_RTL()
def fly_each_frame(self):
vinfo = vehicleinfo.VehicleInfo()
copter_vinfo_options = vinfo.options[self.vehicleinfo_key()]
known_broken_frames = {
'cwx': "missing defaults file",
'deca-cwx': 'missing defaults file',
'djix': "missing defaults file",
'heli-compound': "wrong binary, different takeoff regime",
'heli-dual': "wrong binary, different takeoff regime",
'heli': "wrong binary, different takeoff regime",
'heli-blade360': "wrong binary, different takeoff regime",
'tri': "bad yaw rate",
}
for frame in sorted(copter_vinfo_options["frames"].keys()):
self.start_subtest("Testing frame (%s)" % str(frame))
if frame in known_broken_frames:
self.progress("Actually, no I'm not - it is known-broken (%s)" %
(known_broken_frames[frame]))
continue
frame_bits = copter_vinfo_options["frames"][frame]
print("frame_bits: %s" % str(frame_bits))
if frame_bits.get("external", False):
self.progress("Actually, no I'm not - it is an external simulation")
continue
model = frame_bits.get("model", frame)
# the model string for Callisto has crap in it.... we
# should really have another entry in the vehicleinfo data
# to carry the path to the JSON.
actual_model = model.split(":")[0]
defaults = self.model_defaults_filepath(actual_model)
if type(defaults) != list:
defaults = [defaults]
self.customise_SITL_commandline(
["--defaults", ','.join(defaults), ],
model=model,
wipe=True,
)
# add a listener that verifies yaw looks good:
def verify_yaw(mav, m):
if m.get_type() != 'ATTITUDE':
return
yawspeed_thresh_rads = math.radians(10)
if m.yawspeed > yawspeed_thresh_rads:
raise NotAchievedException("Excessive yaw on takeoff: %f deg/s > %f deg/s (frame=%s)" %
(math.degrees(m.yawspeed), math.degrees(yawspeed_thresh_rads), frame))
self.install_message_hook(verify_yaw)
self.takeoff(10)
self.remove_message_hook(verify_yaw)
self.hover()
self.change_mode('ALT_HOLD')
self.delay_sim_time(1)
def verify_rollpitch(mav, m):
if m.get_type() != 'ATTITUDE':
return
pitch_thresh_rad = math.radians(2)
if m.pitch > pitch_thresh_rad:
raise NotAchievedException("Excessive pitch %f deg > %f deg" %
(math.degrees(m.pitch), math.degrees(pitch_thresh_rad)))
roll_thresh_rad = math.radians(2)
if m.roll > roll_thresh_rad:
raise NotAchievedException("Excessive roll %f deg > %f deg" %
(math.degrees(m.roll), math.degrees(roll_thresh_rad)))
self.install_message_hook(verify_rollpitch)
for i in range(5):
self.set_rc(4, 2000)
self.delay_sim_time(0.5)
self.set_rc(4, 1500)
self.delay_sim_time(5)
self.remove_message_hook(verify_rollpitch)
self.do_RTL()
def test_replay(self):
'''test replay correctness'''
self.progress("Building Replay")
util.build_SITL('tool/Replay', clean=False, configure=False)
self.test_replay_bit(self.test_replay_gps_bit)
self.test_replay_bit(self.test_replay_beacon_bit)
self.test_replay_bit(self.test_replay_optical_flow_bit)
def test_replay_bit(self, bit):
self.context_push()
current_log_filepath = bit()
self.progress("Running replay on (%s)" % current_log_filepath)
util.run_cmd(
['build/sitl/tool/Replay', current_log_filepath],
directory=util.topdir(),
checkfail=True,
show=True,
output=True,
)
self.context_pop()
replay_log_filepath = self.current_onboard_log_filepath()
self.progress("Replay log path: %s" % str(replay_log_filepath))
check_replay = util.load_local_module("Tools/Replay/check_replay.py")
ok = check_replay.check_log(replay_log_filepath, self.progress, verbose=True)
if not ok:
raise NotAchievedException("check_replay failed")
def DefaultIntervalsFromFiles(self):
ex = None
intervals_filepath = util.reltopdir("message-intervals-chan0.txt")
self.progress("Using filepath (%s)" % intervals_filepath)
try:
with open(intervals_filepath, "w") as f:
f.write("""30 50
28 100
29 200
""")
# other tests may have explicitly set rates, so wipe parameters:
def custom_stream_rate_setter():
for stream in mavutil.mavlink.MAV_DATA_STREAM_EXTRA3, mavutil.mavlink.MAV_DATA_STREAM_RC_CHANNELS:
self.set_streamrate(5, stream=stream)
self.customise_SITL_commandline(
[],
wipe=True,
set_streamrate_callback=custom_stream_rate_setter,
)
self.assert_message_rate_hz("ATTITUDE", 20)
self.assert_message_rate_hz("SCALED_PRESSURE", 5)
except Exception as e:
self.print_exception_caught(e)
ex = e
os.unlink(intervals_filepath)
self.reboot_sitl()
if ex is not None:
raise ex
def BaroDrivers(self):
sensors = [
("MS5611", 2),
]
for (name, bus) in sensors:
self.context_push()
if bus is not None:
self.set_parameter("BARO_EXT_BUS", bus)
self.set_parameter("BARO_PROBE_EXT", 1 << 2)
self.reboot_sitl()
self.wait_ready_to_arm()
self.arm_vehicle()
# insert listener to compare airspeeds:
messages = [None, None, None]
global count
count = 0
def check_pressure(mav, m):
global count
m_type = m.get_type()
count += 1
# if count > 500:
# if press_abs[0] is None or press_abs[1] is None:
# raise NotAchievedException("Not receiving messages")
if m_type == 'SCALED_PRESSURE3':
off = 2
elif m_type == 'SCALED_PRESSURE2':
off = 1
elif m_type == 'SCALED_PRESSURE':
off = 0
else:
return
messages[off] = m
if None in messages:
return
first = messages[0]
for msg in messages[1:]:
delta_press_abs = abs(first.press_abs - msg.press_abs)
if delta_press_abs > 0.5: # 50 Pa leeway
raise NotAchievedException("Press_Abs mismatch (press1=%s press2=%s)" % (first, msg))
delta_temperature = abs(first.temperature - msg.temperature)
if delta_temperature > 300: # that's 3-degrees leeway
raise NotAchievedException("Temperature mismatch (t1=%s t2=%s)" % (first, msg))
self.install_message_hook_context(check_pressure)
self.fly_mission("copter_mission.txt", strict=False)
if None in messages:
raise NotAchievedException("Missing a message")
self.context_pop()
self.reboot_sitl()
def test_copter_gps_zero(self):
# https://github.com/ArduPilot/ardupilot/issues/14236
self.progress("arm the vehicle and takeoff in Guided")
self.takeoff(20, mode='GUIDED')
self.progress("fly 50m North (or whatever)")
old_pos = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
self.fly_guided_move_global_relative_alt(50, 0, 20)
self.set_parameter('GPS_TYPE', 0)
self.drain_mav()
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 30 and self.mode_is('LAND'):
self.progress("Bug not reproduced")
break
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True, timeout=1)
self.progress("Received (%s)" % str(m))
if m is None:
raise NotAchievedException("No GLOBAL_POSITION_INT?!")
pos_delta = self.get_distance_int(old_pos, m)
self.progress("Distance: %f" % pos_delta)
if pos_delta < 5:
raise NotAchievedException("Bug reproduced - returned to near origin")
self.wait_disarmed()
self.reboot_sitl()
def test_SMART_RTL(self):
self.context_push()
ex = None
try:
self.progress("arm the vehicle and takeoff in Guided")
self.takeoff(20, mode='GUIDED')
self.progress("fly around a bit (or whatever)")
locs = [
(50, 0, 20),
(-50, 50, 20),
(-50, 0, 20),
]
for (lat, lng, alt) in locs:
self.fly_guided_move_local(lat, lng, alt)
self.change_mode('SMART_RTL')
for (lat, lng, alt) in reversed(locs):
self.wait_distance_to_local_position(
(lat, lng, -alt),
0,
10,
timeout=60
)
self.wait_disarmed()
except Exception as e:
self.print_exception_caught(e)
ex = e
self.disarm_vehicle(force=True)
self.context_pop()
self.reboot_sitl()
if ex is not None:
raise ex
def get_ground_effect_duration_from_current_onboard_log(self, bit, ignore_multi=False):
'''returns a duration in seconds we were expecting to interact with
the ground. Will die if there's more than one such block of
time and ignore_multi is not set (will return first duration
otherwise)
'''
ret = []
dfreader = self.dfreader_for_current_onboard_log()
seen_expected_start_TimeUS = None
first = None
last = None
while True:
m = dfreader.recv_match(type="XKF4")
if m is None:
break
last = m
if first is None:
first = m
# self.progress("%s" % str(m))
expected = m.SS & (1 << bit)
if expected:
if seen_expected_start_TimeUS is None:
seen_expected_start_TimeUS = m.TimeUS
continue
else:
if seen_expected_start_TimeUS is not None:
duration = (m.TimeUS - seen_expected_start_TimeUS)/1000000.0
ret.append(duration)
seen_expected_start_TimeUS = None
if seen_expected_start_TimeUS is not None:
duration = (last.TimeUS - seen_expected_start_TimeUS)/1000000.0
ret.append(duration)
return ret
def get_takeoffexpected_durations_from_current_onboard_log(self, ignore_multi=False):
return self.get_ground_effect_duration_from_current_onboard_log(11, ignore_multi=ignore_multi)
def get_touchdownexpected_durations_from_current_onboard_log(self, ignore_multi=False):
return self.get_ground_effect_duration_from_current_onboard_log(12, ignore_multi=ignore_multi)
def ThrowDoubleDrop(self):
# test boomerang mode:
self.progress("Getting a lift to altitude")
self.set_parameters({
"SIM_SHOVE_Z": -11,
"THROW_TYPE": 1, # drop
"MOT_SPOOL_TIME": 2,
})
self.change_mode('THROW')
self.wait_ready_to_arm()
self.arm_vehicle()
try:
self.set_parameter("SIM_SHOVE_TIME", 30000)
except ValueError:
# the shove resets this to zero
pass
self.wait_altitude(100, 1000, timeout=100, relative=True)
self.context_collect('STATUSTEXT')
self.wait_statustext("throw detected - spooling motors", check_context=True, timeout=10)
self.wait_statustext("throttle is unlimited - uprighting", check_context=True)
self.wait_statustext("uprighted - controlling height", check_context=True)
self.wait_statustext("height achieved - controlling position", check_context=True)
self.progress("Waiting for still")
self.wait_speed_vector(Vector3(0, 0, 0))
self.change_mode('ALT_HOLD')
self.set_rc(3, 1000)
self.wait_disarmed(timeout=90)
self.zero_throttle()
self.progress("second flight")
self.upload_square_mission_items_around_location(self.poll_home_position())
self.set_parameters({
"THROW_NEXTMODE": 3, # auto
})
self.change_mode('THROW')
self.wait_ready_to_arm()
self.arm_vehicle()
try:
self.set_parameter("SIM_SHOVE_TIME", 30000)
except ValueError:
# the shove resets this to zero
pass
self.wait_altitude(100, 1000, timeout=100, relative=True)
self.wait_statustext("throw detected - spooling motors", check_context=True, timeout=10)
self.wait_statustext("throttle is unlimited - uprighting", check_context=True)
self.wait_statustext("uprighted - controlling height", check_context=True)
self.wait_statustext("height achieved - controlling position", check_context=True)
self.wait_mode('AUTO')
self.wait_disarmed(timeout=240)
def GroundEffectCompensation_takeOffExpected(self):
self.change_mode('ALT_HOLD')
self.set_parameter("LOG_FILE_DSRMROT", 1)
self.progress("Making sure we'll have a short log to look at")
self.wait_ready_to_arm()
self.arm_vehicle()
self.disarm_vehicle()
# arm the vehicle and let it disarm normally. This should
# yield a log where the EKF considers a takeoff imminent until
# disarm
self.start_subtest("Check ground effect compensation remains set in EKF while we're at idle on the ground")
self.arm_vehicle()
self.wait_disarmed()
durations = self.get_takeoffexpected_durations_from_current_onboard_log()
duration = durations[0]
want = 9
self.progress("takeoff-expected duration: %fs" % (duration,))
if duration < want: # assumes default 10-second DISARM_DELAY
raise NotAchievedException("Should have been expecting takeoff for longer than %fs (want>%f)" %
(duration, want))
self.start_subtest("takeoffExpected should be false very soon after we launch into the air")
self.takeoff(mode='ALT_HOLD', alt_min=5)
self.change_mode('LAND')
self.wait_disarmed()
durations = self.get_takeoffexpected_durations_from_current_onboard_log(ignore_multi=True)
self.progress("touchdown-durations: %s" % str(durations))
duration = durations[0]
self.progress("takeoff-expected-duration %f" % (duration,))
want_lt = 5
if duration >= want_lt:
raise NotAchievedException("Was expecting takeoff for longer than expected; got=%f want<=%f" %
(duration, want_lt))
def MAV_CMD_CONDITION_YAW_absolute(self):
self.start_subtest("absolute")
self.takeoff(20, mode='GUIDED')
m = self.mav.recv_match(type='VFR_HUD', blocking=True)
initial_heading = m.heading
self.progress("Ensuring initial heading is steady")
target = initial_heading
self.run_cmd(
mavutil.mavlink.MAV_CMD_CONDITION_YAW,
target, # target angle
10, # degrees/second
1, # -1 is counter-clockwise, 1 clockwise
0, # 1 for relative, 0 for absolute
0, # p5
0, # p6
0, # p7
)
self.wait_heading(target, minimum_duration=2, timeout=50)
degsecond = 2
def rate_watcher(mav, m):
if m.get_type() != 'ATTITUDE':
return
if abs(math.degrees(m.yawspeed)) > 5*degsecond:
raise NotAchievedException("Moved too fast (%f>%f)" %
(math.degrees(m.yawspeed), 5*degsecond))
self.install_message_hook_context(rate_watcher)
self.progress("Yaw CW 60 degrees")
target = initial_heading + 60
part_way_target = initial_heading + 10
self.run_cmd(
mavutil.mavlink.MAV_CMD_CONDITION_YAW,
target, # target angle
degsecond, # degrees/second
1, # -1 is counter-clockwise, 1 clockwise
0, # 1 for relative, 0 for absolute
0, # p5
0, # p6
0, # p7
)
self.wait_heading(part_way_target)
self.wait_heading(target, minimum_duration=2)
self.progress("Yaw CCW 60 degrees")
target = initial_heading
part_way_target = initial_heading + 30
self.run_cmd(
mavutil.mavlink.MAV_CMD_CONDITION_YAW,
target, # target angle
degsecond, # degrees/second
-1, # -1 is counter-clockwise, 1 clockwise
0, # 1 for relative, 0 for absolute
0, # p5
0, # p6
0, # p7
)
self.wait_heading(part_way_target)
self.wait_heading(target, minimum_duration=2)
self.do_RTL()
def MAV_CMD_CONDITION_YAW_relative(self):
pass
def MAV_CMD_CONDITION_YAW(self):
self.MAV_CMD_CONDITION_YAW_absolute()
self.MAV_CMD_CONDITION_YAW_relative()
def GroundEffectCompensation_touchDownExpected(self):
self.zero_throttle()
self.change_mode('ALT_HOLD')
self.set_parameter("LOG_FILE_DSRMROT", 1)
self.progress("Making sure we'll have a short log to look at")
self.wait_ready_to_arm()
self.arm_vehicle()
self.disarm_vehicle()
self.start_subtest("Make sure touchdown-expected duration is about right")
self.takeoff(20, mode='ALT_HOLD')
self.change_mode('LAND')
self.wait_disarmed()
durations = self.get_touchdownexpected_durations_from_current_onboard_log(ignore_multi=True)
self.progress("touchdown-durations: %s" % str(durations))
duration = durations[-1]
expected = 23 # this is the time in the final descent phase of LAND
if abs(duration - expected) > 5:
raise NotAchievedException("Was expecting roughly %fs of touchdown expected, got %f" % (expected, duration))
def upload_square_mission_items_around_location(self, loc):
alt = 20
loc.alt = alt
items = [
(mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, 0, 0, alt)
]
for (ofs_n, ofs_e) in (20, 20), (20, -20), (-20, -20), (-20, 20), (20, 20):
items.append((mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, ofs_n, ofs_e, alt))
items.append((mavutil.mavlink.MAV_CMD_NAV_RETURN_TO_LAUNCH, 0, 0, 0))
self.upload_simple_relhome_mission(items)
def RefindGPS(self):
# https://github.com/ArduPilot/ardupilot/issues/14236
self.progress("arm the vehicle and takeoff in Guided")
self.takeoff(20, mode='GUIDED')
self.progress("fly 50m North (or whatever)")
old_pos = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True)
self.fly_guided_move_global_relative_alt(50, 0, 20)
self.set_parameter('GPS_TYPE', 0)
self.drain_mav()
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 30 and self.mode_is('LAND'):
self.progress("Bug not reproduced")
break
m = self.mav.recv_match(type='GLOBAL_POSITION_INT', blocking=True, timeout=1)
self.progress("Received (%s)" % str(m))
if m is None:
raise NotAchievedException("No GLOBAL_POSITION_INT?!")
pos_delta = self.get_distance_int(old_pos, m)
self.progress("Distance: %f" % pos_delta)
if pos_delta < 5:
raise NotAchievedException("Bug reproduced - returned to near origin")
self.set_parameter('GPS_TYPE', 1)
self.do_RTL()
# a wrapper around all the 1A,1B,1C..etc tests for travis
def tests1(self):
ret = ([])
ret.extend(self.tests1a())
ret.extend(self.tests1b())
ret.extend(self.tests1c())
ret.extend(self.tests1d())
ret.extend(self.tests1e())
return ret
def FETtecESC_flight(self):
'''fly with servo outputs from FETtec ESC'''
self.start_subtest("FETtec ESC flight")
num_wp = self.load_mission("copter_mission.txt", strict=False)
self.fly_loaded_mission(num_wp)
def FETtecESC_esc_power_checks(self):
'''Make sure state machine copes with ESCs rebooting'''
self.start_subtest("FETtec ESC reboot")
self.wait_ready_to_arm()
self.context_collect('STATUSTEXT')
self.progress("Turning off an ESC off ")
mask = int(self.get_parameter("SIM_FTOWESC_POW"))
for mot_id_to_kill in 1, 2:
self.progress("Turning ESC=%u off" % mot_id_to_kill)
self.set_parameter("SIM_FTOWESC_POW", mask & ~(1 << mot_id_to_kill))
self.assert_prearm_failure("are not sending tel")
self.progress("Turning it back on")
self.set_parameter("SIM_FTOWESC_POW", mask)
self.wait_ready_to_arm()
self.progress("Turning ESC=%u off (again)" % mot_id_to_kill)
self.set_parameter("SIM_FTOWESC_POW", mask & ~(1 << mot_id_to_kill))
self.assert_prearm_failure("are not sending tel")
self.progress("Turning it back on")
self.set_parameter("SIM_FTOWESC_POW", mask)
self.wait_ready_to_arm()
self.progress("Turning all ESCs off")
self.set_parameter("SIM_FTOWESC_POW", 0)
self.assert_prearm_failure("are not sending tel")
self.progress("Turning them back on")
self.set_parameter("SIM_FTOWESC_POW", mask)
self.wait_ready_to_arm()
def fettec_assert_bad_mask(self, mask):
'''assert the mask is bad for fettec driver'''
self.start_subsubtest("Checking mask (%s) is bad" % (mask,))
self.context_push()
self.set_parameter("SERVO_FTW_MASK", mask)
self.reboot_sitl()
tstart = self.get_sim_time()
while True:
if self.get_sim_time_cached() - tstart > 20:
raise NotAchievedException("Expected mask to be only problem within 20 seconds")
try:
self.assert_prearm_failure("Invalid motor mask")
break
except NotAchievedException:
self.delay_sim_time(1)
self.context_pop()
self.reboot_sitl()
def fettec_assert_good_mask(self, mask):
'''assert the mask is bad for fettec driver'''
self.start_subsubtest("Checking mask (%s) is good" % (mask,))
self.context_push()
self.set_parameter("SERVO_FTW_MASK", mask)
self.reboot_sitl()
self.wait_ready_to_arm()
self.context_pop()
self.reboot_sitl()
def FETtecESC_safety_switch(self):
mot = self.find_first_set_bit(int(self.get_parameter("SERVO_FTW_MASK"))) + 1
self.wait_esc_telem_rpm(mot, 0, 0)
self.wait_ready_to_arm()
self.context_push()
self.set_parameter("DISARM_DELAY", 0)
self.arm_vehicle()
# we have to wait for a while for the arming tone to go out
# before the motors will spin:
self.wait_esc_telem_rpm(
esc=mot,
rpm_min=17640,
rpm_max=17640,
minimum_duration=2,
timeout=5,
)
self.set_safetyswitch_on()
self.wait_esc_telem_rpm(mot, 0, 0)
self.set_safetyswitch_off()
self.wait_esc_telem_rpm(
esc=mot,
rpm_min=17640,
rpm_max=17640,
minimum_duration=2,
timeout=5,
)
self.context_pop()
self.wait_disarmed()
def FETtecESC_btw_mask_checks(self):
'''ensure prearm checks work as expected'''
for bad_mask in [0b1000000000000, 0b10100000000000]:
self.fettec_assert_bad_mask(bad_mask)
for good_mask in [0b00001, 0b00101, 0b110000000000]:
self.fettec_assert_good_mask(good_mask)
def FETtecESC(self):
self.set_parameters({
"SERIAL5_PROTOCOL": 38,
"SERVO_FTW_MASK": 0b11101000,
"SIM_FTOWESC_ENA": 1,
"SERVO1_FUNCTION": 0,
"SERVO2_FUNCTION": 0,
"SERVO3_FUNCTION": 0,
"SERVO4_FUNCTION": 33,
"SERVO5_FUNCTION": 0,
"SERVO6_FUNCTION": 34,
"SERVO7_FUNCTION": 35,
"SERVO8_FUNCTION": 36,
})
self.customise_SITL_commandline(["--uartF=sim:fetteconewireesc"])
self.FETtecESC_safety_switch()
self.FETtecESC_esc_power_checks()
self.FETtecESC_btw_mask_checks()
self.FETtecESC_flight()
def tests1a(self):
'''return list of all tests'''
ret = super(AutoTestCopter, self).tests() # about 5 mins and ~20 initial tests from autotest/common.py
ret.extend([
("NavDelayTakeoffAbsTime",
"Fly Nav Delay (takeoff)",
self.fly_nav_takeoff_delay_abstime), # 19s
("NavDelayAbsTime",
"Fly Nav Delay (AbsTime)",
self.fly_nav_delay_abstime), # 20s
("NavDelay",
"Fly Nav Delay",
self.fly_nav_delay), # 19s
("GuidedSubModeChange",
"Test submode change",
self.fly_guided_change_submode),
("MAV_CMD_CONDITION_YAW",
"Test response to MAV_CMD_CONDITION_YAW",
self.MAV_CMD_CONDITION_YAW),
("LoiterToAlt",
"Loiter-To-Alt",
self.fly_loiter_to_alt), # 25s
("PayLoadPlaceMission",
"Payload Place Mission",
self.fly_payload_place_mission), # 44s
("PrecisionLoiterCompanion",
"Precision Loiter (Companion)",
self.fly_precision_companion), # 29s
("PrecisionLandingSITL",
"Precision Landing drivers (SITL)",
self.fly_precision_landing_drivers), # 29s
("SetModesViaModeSwitch",
"Set modes via modeswitch",
self.test_setting_modes_via_modeswitch),
("SetModesViaAuxSwitch",
"Set modes via auxswitch",
self.test_setting_modes_via_auxswitch),
("AuxSwitchOptions",
"Test random aux mode options",
self.test_aux_switch_options),
("AuxFunctionsInMission",
"Test use of auxilliary functions in missions",
self.test_aux_functions_in_mission),
("AutoTune",
"Fly AUTOTUNE mode",
self.fly_autotune), # 73s
])
return ret
def tests1b(self):
'''return list of all tests'''
ret = ([
("ThrowMode", "Fly Throw Mode", self.fly_throw_mode),
("BrakeMode", "Fly Brake Mode", self.fly_brake_mode),
("RecordThenPlayMission",
"Use switches to toggle in mission, then fly it",
self.fly_square), # 27s
("ThrottleFailsafe",
"Test Throttle Failsafe",
self.fly_throttle_failsafe), # 173s
("GCSFailsafe",
"Test GCS Failsafe",
self.fly_gcs_failsafe), # 239s
# this group has the smallest runtime right now at around
# 5mins, so add more tests here, till its around
# 9-10mins, then make a new group
])
return ret
def tests1c(self):
'''return list of all tests'''
ret = ([
("BatteryFailsafe",
"Fly Battery Failsafe",
self.fly_battery_failsafe), # 164s
("VibrationFailsafe",
"Test Vibration Failsafe",
self.test_vibration_failsafe),
("StabilityPatch",
"Fly stability patch",
lambda: self.fly_stability_patch(30)), # 17s
("OBSTACLE_DISTANCE_3D",
"Test proximity avoidance slide behaviour in 3D",
self.OBSTACLE_DISTANCE_3D), # ??s
("AC_Avoidance_Proximity",
"Test proximity avoidance slide behaviour",
self.fly_proximity_avoidance_test), # 41s
("AC_Avoidance_Fence",
"Test fence avoidance slide behaviour",
self.fly_fence_avoidance_test),
("AC_Avoidance_Beacon",
"Test beacon avoidance slide behaviour",
self.fly_beacon_avoidance_test), # 28s
("BaroWindCorrection",
"Test wind estimation and baro position error compensation",
self.fly_wind_baro_compensation),
("SetpointGlobalPos",
"Test setpoint global position",
self.test_set_position_global_int),
("ThrowDoubleDrop",
"Test a more complicated drop-mode scenario",
self.ThrowDoubleDrop),
("SetpointGlobalVel",
"Test setpoint global velocity",
self.test_set_velocity_global_int),
("SplineTerrain",
"Test Splines and Terrain",
self.test_terrain_spline_mission),
])
return ret
def tests1d(self):
'''return list of all tests'''
ret = ([
("HorizontalFence",
"Test horizontal fence",
self.fly_fence_test), # 20s
("HorizontalAvoidFence",
"Test horizontal Avoidance fence",
self.fly_fence_avoid_test),
("MaxAltFence",
"Test Max Alt Fence",
self.fly_alt_max_fence_test), # 26s
("MinAltFence",
"Test Min Alt Fence",
self.fly_alt_min_fence_test), # 26s
("FenceFloorEnabledLanding",
"Test Landing with Fence floor enabled",
self.fly_fence_floor_enabled_landing),
("AutoTuneSwitch",
"Fly AUTOTUNE on a switch",
self.fly_autotune_switch), # 105s
("GPSGlitchLoiter",
"GPS Glitch Loiter Test",
self.fly_gps_glitch_loiter_test), # 30s
("GPSGlitchLoiter2",
"GPS Glitch Loiter Test2",
self.fly_gps_glitch_loiter_test2), # 30s
("GPSGlitchAuto",
"GPS Glitch Auto Test",
self.fly_gps_glitch_auto_test),
("ModeAltHold",
"Test AltHold Mode",
self.test_mode_ALT_HOLD),
("ModeLoiter",
"Test Loiter Mode",
self.loiter),
("SimpleMode",
"Fly in SIMPLE mode",
self.fly_simple),
("SuperSimpleCircle",
"Fly a circle in SUPER SIMPLE mode",
self.fly_super_simple), # 38s
("ModeCircle",
"Fly CIRCLE mode",
self.fly_circle), # 27s
("MagFail",
"Test magnetometer failure",
self.test_mag_fail),
("OpticalFlow",
"Test Optical Flow",
self.optical_flow),
("OpticalFlowLimits",
"Fly Optical Flow limits",
self.fly_optical_flow_limits), # 27s
("MotorFail",
"Fly motor failure test",
self.fly_motor_fail),
("Flip",
"Fly Flip Mode",
self.fly_flip),
("CopterMission",
"Fly copter mission",
self.fly_auto_test), # 37s
("SplineLastWaypoint",
"Test Spline as last waypoint",
self.test_spline_last_waypoint),
("Gripper",
"Test gripper",
self.test_gripper), # 28s
("TestGripperMission",
"Test Gripper mission items",
self.test_gripper_mission),
("VisionPosition",
"Fly Vision Position",
self.fly_vision_position), # 24s
("BodyFrameOdom",
"Fly Body Frame Odometry Code",
self.fly_body_frame_odom), # 24s
("GPSViconSwitching",
"Fly GPS and Vicon Switching",
self.fly_gps_vicon_switching),
])
return ret
def tests1e(self):
'''return list of all tests'''
ret = ([
("BeaconPosition",
"Fly Beacon Position",
self.fly_beacon_position), # 56s
("RTLSpeed",
"Fly RTL Speed",
self.fly_rtl_speed),
("Mount",
"Test Camera/Antenna Mount",
self.test_mount), # 74s
("MountYawVehicleForMountROI",
"Test Camera/Antenna Mount vehicle yawing for ROI",
self.MountYawVehicleForMountROI),
("Button",
"Test Buttons",
self.test_button),
("ShipTakeoff",
"Fly Simulated Ship Takeoff",
self.fly_ship_takeoff),
("RangeFinder",
"Test RangeFinder Basic Functionality",
self.test_rangefinder), # 23s
("BaroDrivers",
"Test Baro Drivers",
self.BaroDrivers),
("SurfaceTracking",
"Test Surface Tracking",
self.test_surface_tracking), # 45s
("Parachute",
"Test Parachute Functionality",
self.test_parachute),
("ParameterChecks",
"Test Arming Parameter Checks",
self.test_parameter_checks),
("ManualThrottleModeChange",
"Check manual throttle mode changes denied on high throttle",
self.fly_manual_throttle_mode_change),
("MANUAL_CONTROL",
"Test mavlink MANUAL_CONTROL",
self.test_manual_control),
("ZigZag",
"Fly ZigZag Mode",
self.fly_zigzag_mode), # 58s
("PosHoldTakeOff",
"Fly POSHOLD takeoff",
self.fly_poshold_takeoff),
("FOLLOW",
"Fly follow mode",
self.fly_follow_mode), # 80s
("RangeFinderDrivers",
"Test rangefinder drivers",
self.fly_rangefinder_drivers), # 62s
("MaxBotixI2CXL",
"Test maxbotix rangefinder drivers",
self.fly_rangefinder_driver_maxbotix), # 62s
("MAVProximity",
"Test MAVLink proximity driver",
self.fly_proximity_mavlink_distance_sensor,
),
("ParameterValidation",
"Test parameters are checked for validity",
self.test_parameter_validation),
("AltTypes",
"Test Different Altitude Types",
self.test_altitude_types),
("RichenPower",
"Test RichenPower generator",
self.test_richenpower),
("IE24",
"Test IntelligentEnergy 2.4kWh generator",
self.test_ie24),
("LogUpload",
"Log upload",
self.log_upload),
])
return ret
# a wrapper around all the 2A,2B,2C..etc tests for travis
def tests2(self):
ret = ([])
ret.extend(self.tests2a())
ret.extend(self.tests2b())
return ret
def tests2a(self):
'''return list of all tests'''
ret = ([
# something about SITLCompassCalibration appears to fail
# this one, so we put it first:
("FixedYawCalibration",
"Test Fixed Yaw Calibration", # about 20 secs
self.test_fixed_yaw_calibration),
# we run this single 8min-and-40s test on its own, apart from
# requiring FixedYawCalibration right before it because without it, it fails to calibrate
("SITLCompassCalibration", # this autotest appears to interfere with FixedYawCalibration, no idea why.
"Test SITL onboard compass calibration",
self.test_mag_calibration),
])
return ret
def tests2b(self): # this block currently around 9.5mins here
'''return list of all tests'''
ret = ([
Test("MotorVibration",
"Fly motor vibration test",
self.fly_motor_vibration),
Test("DynamicNotches",
"Fly Dynamic Notches",
self.fly_dynamic_notches,
attempts=8),
Test("PositionWhenGPSIsZero",
"Ensure position doesn't zero when GPS lost",
self.test_copter_gps_zero),
Test("DynamicRpmNotches",
"Fly Dynamic Notches driven by ESC Telemetry",
self.fly_esc_telemetry_notches,
attempts=8),
Test("RefindGPS",
"Refind the GPS and attempt to RTL rather than continue to land",
self.RefindGPS),
Test("GyroFFT",
"Fly Gyro FFT",
self.fly_gyro_fft,
attempts=8),
Test("GyroFFTHarmonic",
"Fly Gyro FFT Harmonic Matching",
self.fly_gyro_fft_harmonic,
attempts=8),
Test("CompassReordering",
"Test Compass reordering when priorities are changed",
self.test_mag_reordering), # 40sec?
Test("CRSF",
"Test RC CRSF",
self.test_crsf), # 20secs ish
Test("MotorTest",
"Run Motor Tests",
self.test_motortest), # 20secs ish
Test("AltEstimation",
"Test that Alt Estimation is mandatory for ALT_HOLD",
self.test_alt_estimate_prearm), # 20secs ish
Test("EKFSource",
"Check EKF Source Prearms work",
self.test_ekf_source),
Test("GSF",
"Check GSF",
self.test_gsf),
Test("SMART_RTL",
"Check SMART_RTL",
self.test_SMART_RTL),
Test("FlyEachFrame",
"Fly each supported internal frame",
self.fly_each_frame),
Test("GPSBlending",
"Test GPS Blending",
self.test_gps_blending),
Test("DataFlash",
"Test DataFlash Block backend",
self.test_dataflash_sitl),
Test("DataFlashErase",
"Test DataFlash Block backend erase",
self.test_dataflash_erase),
Test("Callisto",
"Test Callisto",
self.test_callisto),
Test("Replay",
"Test Replay",
self.test_replay),
Test("FETtecESC",
"Test FETtecESC",
self.FETtecESC),
Test("GroundEffectCompensation_touchDownExpected",
"Test EKF's handling of touchdown-expected",
self.GroundEffectCompensation_touchDownExpected),
Test("GroundEffectCompensation_takeOffExpected",
"Test EKF's handling of takeoff-expected",
self.GroundEffectCompensation_takeOffExpected),
Test("WPNAV_SPEED",
"Change speed during misison",
self.WPNAV_SPEED),
Test("WPNAV_SPEED_UP",
"Change speed (up) during misison",
self.WPNAV_SPEED_UP),
Test("WPNAV_SPEED_DN",
"Change speed (down) during misison",
self.WPNAV_SPEED_DN),
("DefaultIntervalsFromFiles",
"Test setting default mavlink message intervals from files",
self.DefaultIntervalsFromFiles),
Test("GPSTypes",
"Test simulated GPS types",
self.GPSTypes),
Test("MultipleGPS",
"Test multi-GPS behaviour",
self.MultipleGPS),
Test("LogUpload",
"Log upload",
self.log_upload),
])
return ret
def testcan(self):
ret = ([
("CANGPSCopterMission",
"Fly copter mission",
self.fly_auto_test_using_can_gps),
])
return ret
def tests(self):
ret = []
ret.extend(self.tests1())
ret.extend(self.tests2())
return ret
def disabled_tests(self):
return {
"Parachute": "See https://github.com/ArduPilot/ardupilot/issues/4702",
"HorizontalAvoidFence": "See https://github.com/ArduPilot/ardupilot/issues/11525",
"AltEstimation": "See https://github.com/ArduPilot/ardupilot/issues/15191",
}
class AutoTestCopterTests1(AutoTestCopter):
def tests(self):
return self.tests1()
class AutoTestCopterTests1a(AutoTestCopter):
def tests(self):
return self.tests1a()
class AutoTestCopterTests1b(AutoTestCopter):
def tests(self):
return self.tests1b()
class AutoTestCopterTests1c(AutoTestCopter):
def tests(self):
return self.tests1c()
class AutoTestCopterTests1d(AutoTestCopter):
def tests(self):
return self.tests1d()
class AutoTestCopterTests1e(AutoTestCopter):
def tests(self):
return self.tests1e()
class AutoTestCopterTests2(AutoTestCopter):
def tests(self):
return self.tests2()
class AutoTestCopterTests2a(AutoTestCopter):
def tests(self):
return self.tests2a()
class AutoTestCopterTests2b(AutoTestCopter):
def tests(self):
return self.tests2b()
class AutoTestCAN(AutoTestCopter):
def tests(self):
return self.testcan()
|
meee1/ardupilot
|
Tools/autotest/arducopter.py
|
Python
|
gpl-3.0
| 313,742
|
[
"Gaussian"
] |
486407450b60a2ad9de085ea8c0956098d52041d2a14c98b62a4c8165466f387
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.228214
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/web/epgmulti.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class epgmulti(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(epgmulti, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_29523168 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2eventlist>
''')
for event in VFFSL(SL,"events",True): # generated from line 4, col 2
write(u'''\t<e2event>
\t\t<e2eventid>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.id",True)) # u'$str($event.id)' on line 6, col 14
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.id)')) # from line 6, col 14.
write(u'''</e2eventid>
\t\t<e2eventstart>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.begin_timestamp",True)) # u'$str($event.begin_timestamp)' on line 7, col 17
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.begin_timestamp)')) # from line 7, col 17.
write(u'''</e2eventstart>
\t\t<e2eventduration>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.duration_sec",True)) # u'$str($event.duration_sec)' on line 8, col 20
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.duration_sec)')) # from line 8, col 20.
write(u'''</e2eventduration>
\t\t<e2eventcurrenttime>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.now_timestamp",True)) # u'$str($event.now_timestamp)' on line 9, col 23
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.now_timestamp)')) # from line 9, col 23.
write(u'''</e2eventcurrenttime>
\t\t<e2eventtitle>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.title",True)) # u'$str($event.title)' on line 10, col 17
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.title)')) # from line 10, col 17.
write(u'''</e2eventtitle>
\t\t<e2eventdescription>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.shortdesc",True)) # u'$str($event.shortdesc)' on line 11, col 23
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.shortdesc)')) # from line 11, col 23.
write(u'''</e2eventdescription>
\t\t<e2eventdescriptionextended>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.longdesc",True)) # u'$str($event.longdesc)' on line 12, col 31
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.longdesc)')) # from line 12, col 31.
write(u'''</e2eventdescriptionextended>
\t\t<e2eventservicereference>''')
_v = VFFSL(SL,"event.sref",True) # u'$event.sref' on line 13, col 28
if _v is not None: write(_filter(_v, rawExpr=u'$event.sref')) # from line 13, col 28.
write(u'''</e2eventservicereference>
\t\t<e2eventservicename>''')
_v = VFFSL(SL,"event.sname",True) # u'$event.sname' on line 14, col 23
if _v is not None: write(_filter(_v, rawExpr=u'$event.sname')) # from line 14, col 23.
write(u'''</e2eventservicename>
\t</e2event>
''')
write(u'''</e2eventlist>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_29523168
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_epgmulti= 'respond'
## END CLASS DEFINITION
if not hasattr(epgmulti, '_initCheetahAttributes'):
templateAPIClass = getattr(epgmulti, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(epgmulti)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=epgmulti()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/web/epgmulti.py
|
Python
|
gpl-2.0
| 7,477
|
[
"VisIt"
] |
960fa0ff7fd417a04c2d8211d88b0428b60f0bf6783659b1569293e38d948bdb
|
from .lpi_filter import inverse, wiener, LPIFilter2D
from ._gaussian import gaussian
from .edges import (sobel, hsobel, vsobel, sobel_h, sobel_v,
scharr, hscharr, vscharr, scharr_h, scharr_v,
prewitt, hprewitt, vprewitt, prewitt_h, prewitt_v,
roberts, roberts_positive_diagonal,
roberts_negative_diagonal, roberts_pos_diag,
roberts_neg_diag, laplace)
from ._rank_order import rank_order
from ._gabor import gabor_kernel, gabor
from .thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,
threshold_isodata, threshold_li)
from . import rank
from .rank import median
from .._shared.utils import deprecated, copy_func
from .. import restoration
gaussian_filter = copy_func(gaussian, name='gaussian_filter')
gaussian_filter = deprecated('skimage.filters.gaussian')(gaussian_filter)
gabor_filter = copy_func(gabor, name='gabor_filter')
gabor_filter = deprecated('skimage.filters.gabor')(gabor_filter)
# Backward compatibility v<0.11
@deprecated('skimage.feature.canny')
def canny(*args, **kwargs):
# Hack to avoid circular import
from ..feature._canny import canny as canny_
return canny_(*args, **kwargs)
__all__ = ['inverse',
'wiener',
'LPIFilter2D',
'gaussian',
'median',
'canny',
'sobel',
'hsobel',
'vsobel',
'sobel_h',
'sobel_v',
'scharr',
'hscharr',
'vscharr',
'scharr_h',
'scharr_v',
'prewitt',
'hprewitt',
'vprewitt',
'prewitt_h',
'prewitt_v',
'roberts',
'roberts_positive_diagonal',
'roberts_negative_diagonal',
'roberts_pos_diag',
'roberts_neg_diag',
'laplace',
'denoise_tv_chambolle',
'denoise_bilateral',
'denoise_tv_bregman',
'rank_order',
'gabor_kernel',
'gabor',
'threshold_adaptive',
'threshold_otsu',
'threshold_yen',
'threshold_isodata',
'threshold_li',
'rank']
|
bgris/ODL_bgris
|
lib/python3.5/site-packages/skimage/filters/__init__.py
|
Python
|
gpl-3.0
| 2,256
|
[
"Gaussian"
] |
84d4fbdc669e382100e4a22cbb7ce5bee7a4ff9614ff4cd1c080fe2c88d78e04
|
import unittest, types
from mock import Mock
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.TransformationSystem.Client.TaskManager import TaskBase, WorkflowTasks, RequestTasks
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.TransformationSystem.Client.Transformation import Transformation
def getSitesForSE( ses ):
if ses == 'pippo':
return {'OK':True, 'Value':['Site2', 'Site3']}
else:
return {'OK':True, 'Value':['Site3']}
#############################################################################
class ClientsTestCase( unittest.TestCase ):
""" Base class for the clients test cases
"""
def setUp( self ):
self.mockTransClient = Mock()
self.mockTransClient.setTaskStatusAndWmsID.return_value = {'OK':True}
self.WMSClientMock = Mock()
self.jobMonitoringClient = Mock()
self.mockReqClient = Mock()
self.jobMock = Mock()
self.jobMock2 = Mock()
mockWF = Mock()
mockPar = Mock()
mockWF.findParameter.return_value = mockPar
mockPar.getValue.return_value = 'MySite'
self.jobMock2.workflow = mockWF
self.jobMock2.setDestination.return_value = {'OK':True}
self.jobMock.workflow.return_value = ''
self.jobMock.return_value = self.jobMock2
self.taskBase = TaskBase( transClient = self.mockTransClient )
self.wfTasks = WorkflowTasks( transClient = self.mockTransClient,
submissionClient = self.WMSClientMock,
jobMonitoringClient = self.jobMonitoringClient,
outputDataModule = "mock",
jobClass = self.jobMock )
self.requestTasks = RequestTasks( transClient = self.mockTransClient,
requestClient = self.mockReqClient
)
self.tc = TransformationClient()
self.transformation = Transformation()
self.maxDiff = None
def tearDown( self ):
pass
#############################################################################
class TaskBaseSuccess( ClientsTestCase ):
def test_updateDBAfterTaskSubmission( self ):
res = self.taskBase.updateDBAfterTaskSubmission( {} )
self.assertEqual( res['OK'], True )
#############################################################################
class WorkflowTasksSuccess( ClientsTestCase ):
def test_prepareTranformationTasks( self ):
taskDict = {1:{'TransformationID':1, 'a1':'aa1', 'b1':'bb1', 'Site':'MySite'},
2:{'TransformationID':1, 'a2':'aa2', 'b2':'bb2', 'InputData':['a1', 'a2']},
3:{'TransformationID':2, 'a3':'aa3', 'b3':'bb3'},
}
res = self.wfTasks.prepareTransformationTasks( '', taskDict, 'test_user', 'test_group', 'test_DN' )
self.assertEqual( res, {'OK': True,
'Value': {1: {'a1': 'aa1', 'TaskObject': '', 'TransformationID': 1,
'b1': 'bb1', 'Site': 'MySite'},
2: {'TaskObject': '', 'a2': 'aa2', 'TransformationID': 1,
'InputData': ['a1', 'a2'], 'b2': 'bb2', 'Site': 'MySite'},
3: {'TaskObject': '', 'a3': 'aa3', 'TransformationID': 2,
'b3': 'bb3', 'Site': 'MySite'}
}
}
)
def test__handleDestination( self ):
res = self.wfTasks._handleDestination( {'Site':'', 'TargetSE':''} )
self.assertEqual( res, ['ANY'] )
res = self.wfTasks._handleDestination( {'Site':'ANY', 'TargetSE':''} )
self.assertEqual( res, ['ANY'] )
res = self.wfTasks._handleDestination( {'TargetSE':'Unknown'} )
self.assertEqual( res, ['ANY'] )
res = self.wfTasks._handleDestination( {'Site':'Site1;Site2', 'TargetSE':''} )
self.assertEqual( res, ['Site1', 'Site2'] )
res = self.wfTasks._handleDestination( {'Site':'Site1;Site2', 'TargetSE':'pippo'}, getSitesForSE )
self.assertEqual( res, ['Site2'] )
res = self.wfTasks._handleDestination( {'Site':'Site1;Site2', 'TargetSE':'pippo, pluto'}, getSitesForSE )
self.assertEqual( res, ['Site2'] )
res = self.wfTasks._handleDestination( {'Site':'Site1;Site2;Site3', 'TargetSE':'pippo, pluto'}, getSitesForSE )
self.assertEqual( res, ['Site2', 'Site3'] )
res = self.wfTasks._handleDestination( {'Site':'Site2', 'TargetSE':'pippo, pluto'}, getSitesForSE )
self.assertEqual( res, ['Site2'] )
res = self.wfTasks._handleDestination( {'Site':'ANY', 'TargetSE':'pippo, pluto'}, getSitesForSE )
self.assertEqual( res, ['Site2', 'Site3'] )
res = self.wfTasks._handleDestination( {'Site':'Site1', 'TargetSE':'pluto'}, getSitesForSE )
self.assertEqual( res, [] )
#############################################################################
class RequestTasksSuccess( ClientsTestCase ):
def test_prepareTranformationTasks( self ):
taskDict = {1:{'TransformationID':1, 'TargetSE':'SE1', 'b1':'bb1', 'Site':'MySite',
'InputData':['/this/is/a1.lfn', '/this/is/a2.lfn']},
2:{'TransformationID':1, 'TargetSE':'SE2', 'b2':'bb2', 'InputData':"/this/is/a1.lfn;/this/is/a2.lfn"},
3:{'TransformationID':2, 'TargetSE':'SE3', 'b3':'bb3', 'InputData':''}
}
res = self.requestTasks.prepareTransformationTasks( '', taskDict, 'owner', 'ownerGroup', '/bih/boh/DN' )
self.assert_( res['OK'] )
for task in res['Value'].values():
self.assert_( isinstance( task['TaskObject'], Request ) )
self.assertEqual( task['TaskObject'][0].Type, 'ReplicateAndRegister' )
self.assertEqual( task['TaskObject'][0][0].LFN, '/this/is/a1.lfn' )
self.assertEqual( task['TaskObject'][0][1].LFN, '/this/is/a2.lfn' )
#############################################################################
class TransformationClientSuccess( ClientsTestCase ):
def test__applyTransformationFilesStateMachine( self ):
tsFiles = {}
dictOfNewLFNsStatus = {}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {} )
tsFiles = {}
dictOfNewLFNsStatus = {'foo':['status', 2L, 1234]}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {} )
tsFiles = {'foo':['status', 2L, 1234]}
dictOfNewLFNsStatus = {'foo':'status'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {} )
tsFiles = {'foo':['status', 2L, 1234]}
dictOfNewLFNsStatus = {'foo':'statusA'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {'foo':'statusA'} )
tsFiles = {'foo':['status', 2L, 1234], 'bar':['status', 2L, 5678]}
dictOfNewLFNsStatus = {'foo':'status'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {} )
tsFiles = {'foo':['status', 2L, 1234], 'bar':['status', 2L, 5678]}
dictOfNewLFNsStatus = {'foo':'statusA'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {'foo':'statusA'} )
tsFiles = {'foo':['status', 2L, 1234], 'bar': ['status', 2L, 5678]}
dictOfNewLFNsStatus = {'foo':'A', 'bar':'B'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {'foo':'A', 'bar':'B'} )
tsFiles = {'foo':['status', 2L, 1234]}
dictOfNewLFNsStatus = {'foo':'A', 'bar':'B'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {'foo':'A'} )
tsFiles = {'foo': ['Assigned', 2L, 1234]}
dictOfNewLFNsStatus = {'foo':'A', 'bar':'B'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {'foo':'A'} )
tsFiles = {'foo':['Assigned', 2L, 1234], 'bar':['Assigned', 2L, 5678]}
dictOfNewLFNsStatus = {'foo':'Assigned', 'bar':'Processed'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {'foo':'Assigned', 'bar':'Processed'} )
tsFiles = {'foo':['Processed', 2L, 1234], 'bar':['Unused', 2L, 5678]}
dictOfNewLFNsStatus = {'foo':'Assigned', 'bar':'Processed'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {'foo':'Processed', 'bar':'Processed'} )
tsFiles = {'foo':['Processed', 2L, 1234], 'bar':['Unused', 2L, 5678]}
dictOfNewLFNsStatus = {'foo':'Assigned', 'bar':'Processed'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, True )
self.assertEqual( res, {'foo':'Assigned', 'bar':'Processed'} )
tsFiles = {'foo':['MaxReset', 12L, 1234], 'bar':['Processed', 22L, 5678]}
dictOfNewLFNsStatus = {'foo':'Unused', 'bar':'Unused'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {'foo':'MaxReset', 'bar':'Processed'} )
tsFiles = {'foo':['MaxReset', 12L, 1234], 'bar':['Processed', 22L, 5678]}
dictOfNewLFNsStatus = {'foo':'Unused', 'bar':'Unused'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, True )
self.assertEqual( res, {'foo':'Unused', 'bar':'Unused'} )
tsFiles = {'foo':['Assigned', 20L, 1234], 'bar':['Processed', 2L, 5678]}
dictOfNewLFNsStatus = {'foo':'Unused', 'bar':'Unused'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, False )
self.assertEqual( res, {'foo':'MaxReset', 'bar':'Processed'} )
tsFiles = {'foo':['Assigned', 20L, 1234], 'bar':['Processed', 2L, 5678]}
dictOfNewLFNsStatus = {'foo':'Unused', 'bar':'Unused'}
res = self.tc._applyTransformationFilesStateMachine( tsFiles, dictOfNewLFNsStatus, True )
self.assertEqual( res, {'foo':'Unused', 'bar':'Unused'} )
#############################################################################
class TransformationSuccess( ClientsTestCase ):
def test_setGet( self ):
res = self.transformation.setTransformationName( 'TestTName' )
self.assert_( res['OK'] )
description = 'Test transformation description'
res = self.transformation.setDescription( description )
longDescription = 'Test transformation long description'
res = self.transformation.setLongDescription( longDescription )
self.assert_( res['OK'] )
res = self.transformation.setType( 'MCSimulation' )
self.assert_( res['OK'] )
res = self.transformation.setPlugin( 'aPlugin' )
self.assertTrue( res['OK'] )
def test_SetGetReset( self ):
""" Testing of the set, get and reset methods.
set*()
get*()
setTargetSE()
setSourceSE()
getTargetSE()
getSourceSE()
reset()
Ensures that after a reset all parameters are returned to their defaults
"""
res = self.transformation.getParameters()
self.assert_( res['OK'] )
defaultParams = res['Value'].copy()
for parameterName, defaultValue in res['Value'].items():
if type( defaultValue ) in types.StringTypes:
testValue = 'TestValue'
else:
testValue = 99999
# # set*
setterName = 'set%s' % parameterName
self.assert_( hasattr( self.transformation, setterName ) )
setter = getattr( self.transformation, setterName )
self.assert_( callable( setter ) )
res = setter( testValue )
self.assert_( res['OK'] )
# # get*
getterName = "get%s" % parameterName
self.assert_( hasattr( self.transformation, getterName ) )
getter = getattr( self.transformation, getterName )
self.assert_( callable( getter ) )
res = getter()
self.assert_( res['OK'] )
self.assert_( res['Value'], testValue )
res = self.transformation.reset()
self.assert_( res['OK'] )
res = self.transformation.getParameters()
self.assert_( res['OK'] )
for parameterName, resetValue in res['Value'].items():
self.assertEqual( resetValue, defaultParams[parameterName] )
self.assertRaises( AttributeError, self.transformation.getTargetSE )
self.assertRaises( AttributeError, self.transformation.getSourceSE )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( ClientsTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TaskBaseSuccess ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( WorkflowTasksSuccess ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( RequestTasksSuccess ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TransformationClientSuccess ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( TransformationSuccess ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
Sbalbp/DIRAC
|
TransformationSystem/Client/test/test_Client.py
|
Python
|
gpl-3.0
| 13,260
|
[
"DIRAC"
] |
12f8d90c0378c576c7c44527785b25bfe01f881bad1f1fd26e0eec5a6a3c1dbe
|
import enum
import inspect
import pydoc
import unittest
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, unique
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
tomato = 1
banana = 2
cherry = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None,
*, protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
if target is None:
target = source
for protocol in range(start, stop+1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj,
*, protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
for protocol in range(start, stop+1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
self.assertNotIn(3, Season)
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_exception(
self.assertRaises, PicklingError, self.NestedEnum.twigs,
protocol=(0, 3))
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs,
protocol=(4, HIGHEST_PROTOCOL))
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, 4))
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y, protocol=(4, 4))
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
expected_help_output = """
Help on class Color in module %s:
class Color(enum.Enum)
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping.
""".strip()
class TestStdLib(unittest.TestCase):
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
expected_text = expected_help_output % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', None),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object=None),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
if __name__ == '__main__':
unittest.main()
|
Orav/kbengine
|
kbe/src/lib/python/Lib/test/test_enum.py
|
Python
|
lgpl-3.0
| 58,305
|
[
"MOE"
] |
4c93d2ba80fc0913c0255ee9582fb598217d8ec53a5d8e110d5f550a3af78407
|
#!/usr/bin/env python
"""[License: GNU General Public License v3 (GPLv3)]
This file is part of FuMa.
FuMa is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FuMa is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Documentation as defined by:
<http://epydoc.sourceforge.net/manual-fields.html#fields-synonyms>
"""
#import gffutils
import HTSeq
import logging
class GeneAnnotation:
"""Gene annotation is a virtual reference genome. It's only being
used the map Genes to in order to index them quickly.
"""
logger = logging.getLogger("FuMa::GeneAnnotation")
def __init__(self,name):
self.n = 0
self.name = name
# list(db.region(region=('2L', 9277, 10000), completely_within=True))
#self.gas2 = gffutils.create_db(gtf, dbfn=db_file)
self.gas = HTSeq.GenomicArrayOfSets("auto", stranded=False)
def add_annotation(self,gene,chromosome,start,stop):
#self.logger.debug("Adding annotation "+str(self.n)+": "+chromosome+":"+str(start)+"-"+str(stop)+" = "+str(gene))
self.gas[HTSeq.GenomicInterval(chromosome,start,stop)] += gene
self.n += 1
def get_annotations(self,chromosome,position):
#unique_genes = list(reduce(lambda s1, s2: s1 | s2, [x[1] for x in r])) << weird list construction - only neccesairy using the steps() function
for annotation in self.gas[HTSeq.GenomicPosition(chromosome,position)]:
yield annotation
def __str__(self):
out = "[ Gene annotation: "+str(self.name)+" (genes: "+str(len(self))+")]"
for chromosome_name,chromosome_obj in self.gas.chrom_vectors.items():
out += "Chromosome: "+str(chromosome_name)+"\n"
genes = list(reduce(lambda s1, s2: s1 | s2, [x[1] for x in self.gas[HTSeq.GenomicInterval(chromosome_name,0,chromosome_obj['.'].iv.end)].steps()]))
for gene in genes:
out += " - "+str(gene)+"\n"
return out
def __len__(self):
return self.n
def __iter__(self):
for chromosome_name,chromosome_obj in self.gas.chrom_vectors.items():
for gene in list(reduce(lambda s1, s2: s1 | s2, [x[1] for x in self.gas[HTSeq.GenomicInterval(chromosome_name,0,chromosome_obj['.'].iv.end)].steps()])):
yield gene
#def show_me(self):
# print self.__str__()
|
yhoogstrate/fuma
|
fuma/GeneAnnotation.py
|
Python
|
gpl-3.0
| 2,648
|
[
"HTSeq"
] |
96b43c138947d325b59e5cbb7276d6da4f3c5d8424e129859547a33a11ae358b
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.builtins import map, range, zip
from six import StringIO
from unittest import TestCase, main
from functools import partial
from skbio import (Sequence, DNA, RNA, Protein, SequenceCollection, Alignment)
from skbio.io import FASTAFormatError
from skbio.io.fasta import (
_fasta_sniffer, _fasta_to_generator, _fasta_to_biological_sequence,
_fasta_to_dna_sequence, _fasta_to_rna_sequence, _fasta_to_protein_sequence,
_fasta_to_sequence_collection, _fasta_to_alignment, _generator_to_fasta,
_biological_sequence_to_fasta, _dna_sequence_to_fasta,
_rna_sequence_to_fasta, _protein_sequence_to_fasta,
_sequence_collection_to_fasta, _alignment_to_fasta)
from skbio.util import get_data_path
class SnifferTests(TestCase):
def setUp(self):
self.positive_fps = list(map(get_data_path, [
'fasta_5_blanks_start_of_file',
'fasta_5_ws_lines_start_of_file',
'fasta_blanks_end_of_file',
'fasta_ws_lines_end_of_file',
'fasta_blank_lines_between_records',
'fasta_3_seqs_defaults',
'fasta_max_width_1',
'fasta_single_bio_seq_non_defaults',
'fasta_single_prot_seq_non_defaults',
'fasta_3_seqs_non_defaults',
'fasta_max_width_5',
'fasta_single_dna_seq_defaults',
'fasta_single_rna_seq_defaults',
'fasta_description_newline_replacement_empty_str',
'fasta_multi_seq',
'fasta_single_dna_seq_non_defaults',
'fasta_single_rna_seq_non_defaults',
'fasta_description_newline_replacement_multi_char',
'fasta_prot_seqs_odd_labels',
'fasta_single_seq',
'fasta_id_whitespace_replacement_empty_str',
'fasta_sequence_collection_different_type',
'fasta_id_whitespace_replacement_multi_char',
'fasta_single_bio_seq_defaults',
'fasta_single_prot_seq_defaults',
'fasta_10_seqs',
'fasta_invalid_after_10_seqs',
'fasta_mixed_qual_scores',
'qual_invalid_qual_scores_float',
'qual_invalid_qual_scores_string'
]))
self.negative_fps = list(map(get_data_path, [
'empty',
'whitespace_only',
'fasta_invalid_missing_header',
'fasta_invalid_blank_line_after_header',
'fasta_invalid_blank_sequence',
'fasta_invalid_blank_line_within_sequence',
'fasta_invalid_whitespace_only_line_within_sequence',
'fasta_invalid_whitespace_line_after_header',
'fasta_invalid_missing_seq_data_first',
'fasta_invalid_missing_seq_data_middle',
'fasta_invalid_missing_seq_data_last',
'fasta_invalid_legacy_format',
'fasta_invalid_whitespace_only_sequence',
'fasta_id_whitespace_replacement_none',
'fasta_description_newline_replacement_none',
'fasta_6_blanks_start_of_file',
'fasta_6_ws_lines_start_of_file',
'qual_2_seqs_defaults',
'qual_3_seqs_defaults',
'qual_3_seqs_defaults_desc_mismatch',
'qual_3_seqs_defaults_extra',
'qual_3_seqs_defaults_id_mismatch',
'qual_3_seqs_defaults_length_mismatch',
'qual_3_seqs_non_defaults',
'qual_description_newline_replacement_empty_str',
'qual_description_newline_replacement_multi_char',
'qual_description_newline_replacement_none',
'qual_id_whitespace_replacement_empty_str',
'qual_id_whitespace_replacement_multi_char',
'qual_id_whitespace_replacement_none',
'qual_invalid_blank_line_within_seq',
'qual_invalid_legacy_format',
'qual_invalid_missing_header',
'qual_invalid_missing_qual_scores_first',
'qual_invalid_missing_qual_scores_last',
'qual_invalid_missing_qual_scores_middle',
'qual_invalid_whitespace_line_in_seq',
'qual_invalid_blank_line_after_header',
'qual_invalid_blank_sequence',
'qual_invalid_whitespace_only_sequence',
'qual_invalid_ws_line_after_header',
'qual_max_width_1',
'qual_max_width_5',
'qual_multi_seq',
'qual_multi_seq_roundtrip',
'qual_prot_seqs_odd_labels',
'qual_sequence_collection_different_type',
'qual_single_bio_seq_non_defaults',
'qual_single_dna_seq_non_defaults',
'qual_single_prot_seq_non_defaults',
'qual_single_rna_seq_non_defaults',
'qual_single_seq',
'qual_ws_lines_between_records',
'qual_blank_lines_between_records',
'qual_5_blanks_start_of_file',
'qual_5_ws_lines_start_of_file',
'qual_6_blanks_start_of_file',
'qual_6_ws_lines_start_of_file',
'qual_blanks_end_of_file',
'qual_ws_lines_end_of_file'
]))
def test_positives(self):
for fp in self.positive_fps:
self.assertEqual(_fasta_sniffer(fp), (True, {}))
def test_negatives(self):
for fp in self.negative_fps:
self.assertEqual(_fasta_sniffer(fp), (False, {}))
class ReaderTests(TestCase):
def setUp(self):
# each structure stores the sequence generator results (expanded into a
# list) that we expect to obtain from reading, matched with kwargs to
# pass to the reader, and fasta and qual filepaths that should
# deserialize into the expected generator results
# empty file shouldn't yield sequences
self.empty = ([], {}, list(map(get_data_path, ['empty',
'whitespace_only'])),
list(map(get_data_path, ['empty', 'whitespace_only'])))
# single sequence
self.single = (
[Sequence(
'ACGT-acgt.', id='seq1', description='desc1',
quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])],
{},
list(map(get_data_path, ['fasta_single_seq',
'fasta_max_width_1'])),
list(map(get_data_path, ['qual_single_seq', 'qual_max_width_1']))
)
# multiple sequences
self.multi = (
[Sequence(
'ACGT-acgt.', id='seq1', description='desc1',
quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456]),
Sequence('A', id='_____seq__2_', quality=[42]),
Sequence(
'AACGGuA', description='desc3', quality=[0, 0, 0, 0, 0, 0, 0]),
Sequence(
'ACGTTGCAccGG',
quality=[55, 10, 0, 999, 1, 1, 8, 775, 40, 10, 10, 0]),
Sequence('ACGUU', quality=[10, 9, 8, 7, 6]),
Sequence(
'pQqqqPPQQQ', id='proteinseq',
description='detailed description \t\twith new lines',
quality=[42, 42, 442, 442, 42, 42, 42, 42, 42, 43])],
{},
list(map(get_data_path, ['fasta_multi_seq', 'fasta_max_width_5',
'fasta_blank_lines_between_records',
'fasta_ws_lines_between_records',
'fasta_5_blanks_start_of_file',
'fasta_5_ws_lines_start_of_file',
'fasta_6_blanks_start_of_file',
'fasta_6_ws_lines_start_of_file',
'fasta_blanks_end_of_file',
'fasta_ws_lines_end_of_file'])),
list(map(get_data_path, ['qual_multi_seq', 'qual_max_width_5',
'qual_blank_lines_between_records',
'qual_ws_lines_between_records',
'qual_5_blanks_start_of_file',
'qual_5_ws_lines_start_of_file',
'qual_6_blanks_start_of_file',
'qual_6_ws_lines_start_of_file',
'qual_blanks_end_of_file',
'qual_ws_lines_end_of_file']))
)
# test constructor parameter, as well as odd labels (label only
# containing whitespace, label description preceded by multiple spaces,
# no id) and leading/trailing whitespace on sequence data. for qual
# files, in addition to the odd labels, test leading/trailing
# whitespace on qual scores, as well as strange number formatting.
# also test that fasta and qual headers do not need to match
# exactly, only that they need to match exactly after parsing (e.g.,
# after stripping leading/trailing whitespace from descriptions)
self.odd_labels_different_type = (
[Protein('DEFQfp', quality=[0, 0, 1, 5, 44, 0], validate=False),
Protein(
'SKBI', description='skbio', quality=[1, 2, 33, 123456789])],
{'constructor': partial(Protein, validate=False)},
list(map(get_data_path, ['fasta_prot_seqs_odd_labels'])),
list(map(get_data_path, ['qual_prot_seqs_odd_labels']))
)
# sequences that can be loaded into a SequenceCollection or Alignment.
# they are also a different type than Sequence in order to
# exercise the constructor parameter
self.sequence_collection_different_type = (
[RNA('AUG', quality=[20, 20, 21]),
RNA('AUC', id='rnaseq-1', description='rnaseq desc 1',
quality=[10, 9, 10]),
RNA('AUG', id='rnaseq-2', description='rnaseq desc 2',
quality=[9, 99, 999])],
{'constructor': partial(RNA, validate=False)},
list(map(get_data_path,
['fasta_sequence_collection_different_type'])),
list(map(get_data_path,
['qual_sequence_collection_different_type']))
)
# store fasta filepath, kwargs, error type, and expected error message
# for invalid input.
#
# note: there is some duplication in testing that fasta and qual
# parsers raise expected errors. even though the parsers share the same
# underlying logic, these tests are here as a safeguard in case the
# code is refactored in the future such that fasta and qual have
# different implementations (e.g., if qual is written in cython while
# fasta remains in python)
self.invalid_fps = list(map(lambda e: (get_data_path(e[0]),
e[1], e[2], e[3]), [
# fasta and qual missing header
('fasta_invalid_missing_header', {}, FASTAFormatError,
'non-header.*1st FASTA'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_missing_header')},
FASTAFormatError, 'non-header.*1st QUAL'),
# fasta and qual with blank line within sequence
('fasta_invalid_blank_line_within_sequence', {}, FASTAFormatError,
'whitespace-only.*FASTA'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_blank_line_within_seq')},
FASTAFormatError, 'whitespace-only.*QUAL'),
# fasta and qual with blank after header
('fasta_invalid_blank_sequence', {}, FASTAFormatError,
'without sequence data'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_blank_sequence')},
FASTAFormatError, 'without quality scores'),
# fasta and qual with whitespace only sequence
('fasta_invalid_whitespace_only_sequence', {}, FASTAFormatError,
'without sequence data'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_whitespace_only_sequence')},
FASTAFormatError, 'without quality scores'),
# fasta and qual with blank line within sequence
('fasta_invalid_blank_line_after_header', {}, FASTAFormatError,
'whitespace-only.*FASTA'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_blank_line_after_header')},
FASTAFormatError, 'whitespace-only.*QUAL'),
# fasta and qual with whitespace-only line within sequence
('fasta_invalid_whitespace_only_line_within_sequence',
{}, FASTAFormatError, 'whitespace-only.*FASTA'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_whitespace_line_in_seq')},
FASTAFormatError, 'whitespace-only.*QUAL'),
# fasta and qual with whitespace-only line after header
('fasta_invalid_whitespace_line_after_header',
{}, FASTAFormatError, 'whitespace-only.*FASTA'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_ws_line_after_header')},
FASTAFormatError, 'whitespace-only.*QUAL'),
# fasta and qual missing record data (first record)
('fasta_invalid_missing_seq_data_first', {}, FASTAFormatError,
'without sequence data'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_missing_qual_scores_first')},
FASTAFormatError, 'without quality scores'),
# fasta and qual missing record data (middle record)
('fasta_invalid_missing_seq_data_middle', {}, FASTAFormatError,
'without sequence data'),
('fasta_3_seqs_defaults',
{'qual':
get_data_path('qual_invalid_missing_qual_scores_middle')},
FASTAFormatError, 'without quality scores'),
# fasta and qual missing record data (last record)
('fasta_invalid_missing_seq_data_last', {}, FASTAFormatError,
'without sequence data'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_missing_qual_scores_last')},
FASTAFormatError, 'without quality scores'),
# fasta and qual in legacy format (;)
('fasta_invalid_legacy_format', {}, FASTAFormatError,
'non-header.*1st FASTA'),
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_legacy_format')},
FASTAFormatError, 'non-header.*1st QUAL'),
# qual file with an extra record
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_3_seqs_defaults_extra')},
FASTAFormatError, 'QUAL file has more'),
# fasta file with an extra record
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_2_seqs_defaults')},
FASTAFormatError, 'FASTA file has more'),
# id mismatch between fasta and qual
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_3_seqs_defaults_id_mismatch')},
FASTAFormatError,
'IDs do not match.*\'s_e_q_2\' != \'s_e_q_42\''),
# description mismatch between fasta and qual
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_3_seqs_defaults_desc_mismatch')},
FASTAFormatError,
'Descriptions do not match.*\'desc 2\' != \'desc 42\''),
# sequence and quality score length mismatch between fasta and qual
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_3_seqs_defaults_length_mismatch')},
ValueError,
'Number of quality scores \(3\) must match the number of characte'
'rs in the sequence \(4\)\.'),
# invalid qual scores (string value can't be converted to integer)
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_qual_scores_string')},
FASTAFormatError,
'quality scores to integers:\n100 0 1a -42'),
# invalid qual scores (float value can't be converted to integer)
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_qual_scores_float')},
FASTAFormatError,
'quality scores to integers:\n42 41.0 39 40'),
# invalid qual scores (negative integer)
('fasta_3_seqs_defaults',
{'qual': get_data_path('qual_invalid_qual_scores_negative')},
ValueError,
'Quality scores must be greater than or equal to zero\.'),
# misc. invalid files used elsewhere in the tests
('fasta_invalid_after_10_seqs', {}, FASTAFormatError,
'without sequence data'),
('fasta_id_whitespace_replacement_none', {}, FASTAFormatError,
'whitespace-only.*FASTA'),
('fasta_description_newline_replacement_none', {},
FASTAFormatError, 'whitespace-only.*FASTA')
]))
# extensive tests for fasta -> generator reader since it is used by all
# other fasta -> object readers
def test_fasta_to_generator_valid_files(self):
test_cases = (self.empty, self.single, self.multi,
self.odd_labels_different_type,
self.sequence_collection_different_type)
# Strategy:
# for each fasta file, read it without its corresponding qual file,
# and ensure observed vs. expected match, ignoring quality scores in
# expected. next, parse the current fasta file with each
# corresponding quality file and ensure that observed vs. expected
# match, this time taking quality scores into account. this
# sufficiently exercises parsing a standalone fasta file and paired
# fasta/qual files
for exp, kwargs, fasta_fps, qual_fps in test_cases:
for fasta_fp in fasta_fps:
obs = list(_fasta_to_generator(fasta_fp, **kwargs))
self.assertEqual(len(obs), len(exp))
for o, e in zip(obs, exp):
self.assertTrue(o.equals(e, ignore=['quality']))
for qual_fp in qual_fps:
obs = list(_fasta_to_generator(fasta_fp, qual=qual_fp,
**kwargs))
self.assertEqual(len(obs), len(exp))
for o, e in zip(obs, exp):
self.assertTrue(o.equals(e))
def test_fasta_to_generator_invalid_files(self):
for fp, kwargs, error_type, error_msg_regex in self.invalid_fps:
with self.assertRaisesRegexp(error_type, error_msg_regex):
list(_fasta_to_generator(fp, **kwargs))
# light testing of fasta -> object readers to ensure interface is present
# and kwargs are passed through. extensive testing of underlying reader is
# performed above
def test_fasta_to_any_sequence(self):
for constructor, reader_fn in ((Sequence,
_fasta_to_biological_sequence),
(partial(DNA, validate=False),
_fasta_to_dna_sequence),
(partial(RNA, validate=False),
_fasta_to_rna_sequence),
(partial(Protein, validate=False),
_fasta_to_protein_sequence)):
# empty file
empty_fp = get_data_path('empty')
with self.assertRaisesRegexp(ValueError, '1st sequence'):
reader_fn(empty_fp)
with self.assertRaisesRegexp(ValueError, '1st sequence'):
reader_fn(empty_fp, qual=empty_fp)
# the sequences in the following files don't necessarily make sense
# for each of the sequence object types that they're read into
# (e.g., reading a protein sequence into a dna sequence object).
# however, for the purposes of testing the various
# fasta -> sequence readers, this works out okay as it is valid to
# construct a sequence object with invalid characters. we're
# interested in testing the reading logic here, and don't care so
# much about constructing semantically-meaningful/valid sequence
# objects
# file with only 1 seq, get first
fasta_fps = list(map(get_data_path,
['fasta_single_seq', 'fasta_max_width_1']))
for fasta_fp in fasta_fps:
exp = constructor(
'ACGT-acgt.', id='seq1', description='desc1',
quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])
obs = reader_fn(fasta_fp)
self.assertTrue(obs.equals(exp, ignore=['quality']))
qual_fps = list(map(get_data_path,
['qual_single_seq', 'qual_max_width_1']))
for qual_fp in qual_fps:
obs = reader_fn(fasta_fp, qual=qual_fp)
self.assertTrue(obs.equals(exp))
# file with multiple seqs
fasta_fps = list(map(get_data_path,
['fasta_multi_seq', 'fasta_max_width_5']))
qual_fps = list(map(get_data_path,
['qual_multi_seq', 'qual_max_width_5']))
for fasta_fp in fasta_fps:
# get first
exp = constructor(
'ACGT-acgt.', id='seq1', description='desc1',
quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])
obs = reader_fn(fasta_fp)
self.assertTrue(obs.equals(exp, ignore=['quality']))
for qual_fp in qual_fps:
obs = reader_fn(fasta_fp, qual=qual_fp)
self.assertTrue(obs.equals(exp))
# get middle
exp = constructor('ACGTTGCAccGG',
quality=[55, 10, 0, 999, 1, 1, 8, 775, 40,
10, 10, 0])
obs = reader_fn(fasta_fp, seq_num=4)
self.assertTrue(obs.equals(exp, ignore=['quality']))
for qual_fp in qual_fps:
obs = reader_fn(fasta_fp, seq_num=4, qual=qual_fp)
self.assertTrue(obs.equals(exp))
# get last
exp = constructor(
'pQqqqPPQQQ', id='proteinseq',
description='detailed description \t\twith new lines',
quality=[42, 42, 442, 442, 42, 42, 42, 42, 42, 43])
obs = reader_fn(fasta_fp, seq_num=6)
self.assertTrue(obs.equals(exp, ignore=['quality']))
for qual_fp in qual_fps:
obs = reader_fn(fasta_fp, seq_num=6, qual=qual_fp)
self.assertTrue(obs.equals(exp))
# seq_num too large
with self.assertRaisesRegexp(ValueError, '8th sequence'):
reader_fn(fasta_fp, seq_num=8)
for qual_fp in qual_fps:
with self.assertRaisesRegexp(ValueError, '8th sequence'):
reader_fn(fasta_fp, seq_num=8, qual=qual_fp)
# seq_num too small
with self.assertRaisesRegexp(ValueError, '`seq_num`=0'):
reader_fn(fasta_fp, seq_num=0)
for qual_fp in qual_fps:
with self.assertRaisesRegexp(ValueError, '`seq_num`=0'):
reader_fn(fasta_fp, seq_num=0, qual=qual_fp)
def test_fasta_to_sequence_collection_and_alignment(self):
test_cases = (self.empty, self.single,
self.sequence_collection_different_type)
for constructor, reader_fn in ((SequenceCollection,
_fasta_to_sequence_collection),
(Alignment,
_fasta_to_alignment)):
# see comment in test_fasta_to_generator_valid_files (above) for
# testing strategy
for exp_list, kwargs, fasta_fps, qual_fps in test_cases:
exp = constructor(exp_list)
for fasta_fp in fasta_fps:
obs = reader_fn(fasta_fp, **kwargs)
# TODO remove this custom equality testing code when
# SequenceCollection has an equals method (part of #656).
# We need this method to include IDs and description in the
# comparison (not part of SequenceCollection.__eq__).
self.assertEqual(len(obs), len(exp))
for o, e in zip(obs, exp):
self.assertTrue(o.equals(e, ignore=['quality']))
for qual_fp in qual_fps:
obs = reader_fn(fasta_fp, qual=qual_fp, **kwargs)
# TODO remove this custom equality testing code when
# SequenceCollection has an equals method (part of
# #656). We need this method to include IDs and
# description in the comparison (not part of
# SequenceCollection.__eq__).
self.assertEqual(obs, exp)
for o, e in zip(obs, exp):
self.assertTrue(o.equals(e))
class WriterTests(TestCase):
def setUp(self):
self.bio_seq1 = Sequence(
'ACGT-acgt.', id='seq1', description='desc1',
quality=[10, 20, 30, 10, 0, 0, 0, 88888, 1, 3456])
self.bio_seq2 = Sequence(
'A', id=' \n \nseq \t2 ', quality=[42])
self.bio_seq3 = Sequence(
'AACGGuA', description='desc3', quality=[0, 0, 0, 0, 0, 0, 0])
self.dna_seq = DNA(
'ACGTTGCAccGG',
quality=[55, 10, 0, 999, 1, 1, 8, 775, 40, 10, 10, 0],
validate=False)
self.rna_seq = RNA('ACGUU', quality=[10, 9, 8, 7, 6])
self.prot_seq = Protein(
'pQqqqPPQQQ', id='proteinseq',
description='\ndetailed\ndescription \t\twith new\n\nlines\n\n\n',
quality=[42, 42, 442, 442, 42, 42, 42, 42, 42, 43], validate=False)
seqs = [
RNA('UUUU', id='s\te\tq\t1', description='desc\n1',
quality=[1234, 0, 0, 2]),
Sequence(
'CATC', id='s\te\tq\t2', description='desc\n2',
quality=[1, 11, 111, 11112]),
Protein('sits', id='s\te\tq\t3', description='desc\n3',
quality=[12345, 678909, 999999, 4242424242],
validate=False)
]
self.seq_coll = SequenceCollection(seqs)
self.align = Alignment(seqs)
def empty_gen():
raise StopIteration()
yield
def single_seq_gen():
yield self.bio_seq1
# generate sequences with descriptions containing newlines (to test
# description_newline_replacement)
def newline_description_gen():
yield self.prot_seq
yield DNA('AGGAGAATA', id='foo', description='\n\n\n\n',
quality=range(9))
# generate sequences with ids containing whitespace (to test
# id_whitespace_replacement)
def whitespace_id_gen():
yield self.bio_seq2
yield RNA('UA', id='\n\t \t', description='a\nb',
quality=[1000, 1])
# multiple sequences of mixed types, lengths, and metadata. lengths are
# chosen to exercise various splitting cases when testing max_width,
# including exercising the different splitting algorithms used for
# sequence data vs. quality scores
def multi_seq_gen():
for seq in (self.bio_seq1, self.bio_seq2, self.bio_seq3,
self.dna_seq, self.rna_seq, self.prot_seq):
yield seq
# can be serialized if no qual file is provided, else it should raise
# an error because one seq has qual scores and the other doesn't
def mixed_qual_score_gen():
missing_qual_seq = Sequence(
'AAAAT', id='da,dadadada', description='10 hours')
for seq in self.bio_seq1, missing_qual_seq:
yield seq
self.mixed_qual_score_gen = mixed_qual_score_gen()
# store sequence generator to serialize, writer kwargs (if any), and
# fasta and qual filepaths of expected results
self.objs_fps = list(map(lambda e: (e[0], e[1], get_data_path(e[2]),
get_data_path(e[3])), [
(empty_gen(), {}, 'empty', 'empty'),
(single_seq_gen(), {}, 'fasta_single_seq', 'qual_single_seq'),
# no splitting of sequence or qual data across lines b/c max_width
# is sufficiently large
(single_seq_gen(), {'max_width': 32}, 'fasta_single_seq',
'qual_single_seq'),
# splitting algorithm for sequence and qual scores is different;
# make sure individual qual scores aren't split across lines even
# if they exceed max_width
(single_seq_gen(), {'max_width': 1}, 'fasta_max_width_1',
'qual_max_width_1'),
(multi_seq_gen(), {}, 'fasta_multi_seq', 'qual_multi_seq'),
(multi_seq_gen(), {'max_width': 5}, 'fasta_max_width_5',
'qual_max_width_5'),
(newline_description_gen(),
{'description_newline_replacement': ':-)'},
'fasta_description_newline_replacement_multi_char',
'qual_description_newline_replacement_multi_char'),
(newline_description_gen(),
{'description_newline_replacement': ''},
'fasta_description_newline_replacement_empty_str',
'qual_description_newline_replacement_empty_str',),
(newline_description_gen(),
{'description_newline_replacement': None},
'fasta_description_newline_replacement_none',
'qual_description_newline_replacement_none'),
(whitespace_id_gen(),
{'id_whitespace_replacement': '>:o'},
'fasta_id_whitespace_replacement_multi_char',
'qual_id_whitespace_replacement_multi_char'),
(whitespace_id_gen(),
{'id_whitespace_replacement': ''},
'fasta_id_whitespace_replacement_empty_str',
'qual_id_whitespace_replacement_empty_str'),
(whitespace_id_gen(),
{'id_whitespace_replacement': None},
'fasta_id_whitespace_replacement_none',
'qual_id_whitespace_replacement_none'),
]))
def blank_seq_gen():
for seq in self.bio_seq1, Sequence(''):
yield seq
# generators or parameter combos that cannot be written in fasta
# format, paired with kwargs (if any), error type, and expected error
# message regexp
self.invalid_objs = [
(blank_seq_gen(), {}, ValueError, '2nd.*empty'),
(single_seq_gen(),
{'max_width': 0}, ValueError, 'max_width=0'),
(multi_seq_gen(), {'id_whitespace_replacement': '-\n_'},
ValueError, 'Newline character'),
(multi_seq_gen(), {'description_newline_replacement': '-.-\n'},
ValueError, 'Newline character'),
(mixed_qual_score_gen(), {'qual': StringIO()}, ValueError,
'2nd sequence.*does not have quality scores')
]
# extensive tests for generator -> fasta writer since it is used by all
# other object -> fasta writers
def test_generator_to_fasta_no_qual(self):
# test writing standalone fasta (i.e., without a qual file)
for obj, kwargs, fp, _ in self.objs_fps:
fh = StringIO()
_generator_to_fasta(obj, fh, **kwargs)
obs = fh.getvalue()
fh.close()
with open(fp, 'U') as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_generator_to_fasta_mixed_qual_scores(self):
# test writing some sequences with qual scores and some without is
# possible if no qual output file is specified
fh = StringIO()
_generator_to_fasta(self.mixed_qual_score_gen, fh)
obs = fh.getvalue()
fh.close()
with open(get_data_path('fasta_mixed_qual_scores'), 'U') as fh:
exp = fh.read()
self.assertEqual(obs, exp)
def test_generator_to_fasta_with_qual(self):
# test writing fasta and qual files
for obj, kwargs, fasta_fp, qual_fp in self.objs_fps:
if qual_fp is not None:
fasta_fh = StringIO()
qual_fh = StringIO()
_generator_to_fasta(obj, fasta_fh, qual=qual_fh, **kwargs)
obs_fasta = fasta_fh.getvalue()
obs_qual = qual_fh.getvalue()
fasta_fh.close()
qual_fh.close()
with open(fasta_fp, 'U') as fh:
exp_fasta = fh.read()
with open(qual_fp, 'U') as fh:
exp_qual = fh.read()
self.assertEqual(obs_fasta, exp_fasta)
self.assertEqual(obs_qual, exp_qual)
def test_generator_to_fasta_invalid_input(self):
for obj, kwargs, error_type, error_msg_regexp in self.invalid_objs:
fh = StringIO()
with self.assertRaisesRegexp(error_type, error_msg_regexp):
_generator_to_fasta(obj, fh, **kwargs)
fh.close()
# light testing of object -> fasta writers to ensure interface is present
# and kwargs are passed through. extensive testing of underlying writer is
# performed above
def test_any_sequence_to_fasta(self):
# store writer function, sequence object to write, expected
# fasta filepath for default parameters, expected fasta filepath for
# non-defaults, and expected qual filepath for non-defaults
id_ = 'f o o'
desc = 'b\na\nr'
test_data = (
(_biological_sequence_to_fasta,
Sequence('ACGT', id=id_, description=desc,
quality=range(1, 5)),
('fasta_single_bio_seq_defaults',
'fasta_single_bio_seq_non_defaults',
'qual_single_bio_seq_non_defaults')),
(_dna_sequence_to_fasta,
DNA('TACG', id=id_, description=desc, quality=range(4)),
('fasta_single_dna_seq_defaults',
'fasta_single_dna_seq_non_defaults',
'qual_single_dna_seq_non_defaults')),
(_rna_sequence_to_fasta,
RNA('UACG', id=id_, description=desc, quality=range(2, 6)),
('fasta_single_rna_seq_defaults',
'fasta_single_rna_seq_non_defaults',
'qual_single_rna_seq_non_defaults')),
(_protein_sequence_to_fasta,
Protein('PQQ', id=id_, description=desc, quality=[42, 41, 40]),
('fasta_single_prot_seq_defaults',
'fasta_single_prot_seq_non_defaults',
'qual_single_prot_seq_non_defaults')))
for fn, obj, fps in test_data:
defaults_fp, non_defaults_fasta_fp, non_defaults_qual_fp = fps
# test writing with default parameters
fh = StringIO()
fn(obj, fh)
obs = fh.getvalue()
fh.close()
with open(get_data_path(defaults_fp), 'U') as fh:
exp = fh.read()
self.assertEqual(obs, exp)
# test writing with non-defaults
fasta_fh = StringIO()
qual_fh = StringIO()
fn(obj, fasta_fh, id_whitespace_replacement='-',
description_newline_replacement='_', max_width=1, qual=qual_fh)
obs_fasta = fasta_fh.getvalue()
obs_qual = qual_fh.getvalue()
fasta_fh.close()
qual_fh.close()
with open(get_data_path(non_defaults_fasta_fp), 'U') as fh:
exp_fasta = fh.read()
with open(get_data_path(non_defaults_qual_fp), 'U') as fh:
exp_qual = fh.read()
self.assertEqual(obs_fasta, exp_fasta)
self.assertEqual(obs_qual, exp_qual)
def test_any_sequences_to_fasta(self):
for fn, obj in ((_sequence_collection_to_fasta, self.seq_coll),
(_alignment_to_fasta, self.align)):
# test writing with default parameters
fh = StringIO()
fn(obj, fh)
obs = fh.getvalue()
fh.close()
with open(get_data_path('fasta_3_seqs_defaults'), 'U') as fh:
exp = fh.read()
self.assertEqual(obs, exp)
# test writing with non-defaults
fasta_fh = StringIO()
qual_fh = StringIO()
fn(obj, fasta_fh, id_whitespace_replacement='*',
description_newline_replacement='+', max_width=3, qual=qual_fh)
obs_fasta = fasta_fh.getvalue()
obs_qual = qual_fh.getvalue()
fasta_fh.close()
qual_fh.close()
with open(get_data_path('fasta_3_seqs_non_defaults'), 'U') as fh:
exp_fasta = fh.read()
with open(get_data_path('qual_3_seqs_non_defaults'), 'U') as fh:
exp_qual = fh.read()
self.assertEqual(obs_fasta, exp_fasta)
self.assertEqual(obs_qual, exp_qual)
class RoundtripTests(TestCase):
def test_roundtrip_generators(self):
# test that fasta and qual files can be streamed into memory and back
# out to disk using generator reader and writer
fps = list(map(lambda e: list(map(get_data_path, e)),
[('empty', 'empty'),
('fasta_multi_seq_roundtrip',
'qual_multi_seq_roundtrip')]))
for fasta_fp, qual_fp in fps:
with open(fasta_fp, 'U') as fh:
exp_fasta = fh.read()
with open(qual_fp, 'U') as fh:
exp_qual = fh.read()
fasta_fh = StringIO()
qual_fh = StringIO()
_generator_to_fasta(_fasta_to_generator(fasta_fp, qual=qual_fp),
fasta_fh, qual=qual_fh)
obs_fasta = fasta_fh.getvalue()
obs_qual = qual_fh.getvalue()
fasta_fh.close()
qual_fh.close()
self.assertEqual(obs_fasta, exp_fasta)
self.assertEqual(obs_qual, exp_qual)
def test_roundtrip_sequence_collections_and_alignments(self):
fps = list(map(lambda e: list(map(get_data_path, e)),
[('empty', 'empty'),
('fasta_sequence_collection_different_type',
'qual_sequence_collection_different_type')]))
for reader, writer in ((_fasta_to_sequence_collection,
_sequence_collection_to_fasta),
(_fasta_to_alignment,
_alignment_to_fasta)):
for fasta_fp, qual_fp in fps:
# read
obj1 = reader(fasta_fp, qual=qual_fp)
# write
fasta_fh = StringIO()
qual_fh = StringIO()
writer(obj1, fasta_fh, qual=qual_fh)
fasta_fh.seek(0)
qual_fh.seek(0)
# read
obj2 = reader(fasta_fh, qual=qual_fh)
fasta_fh.close()
qual_fh.close()
# TODO remove this custom equality testing code when
# SequenceCollection has an equals method (part of #656).
# We need this method to include IDs and description in the
# comparison (not part of SequenceCollection.__eq__).
self.assertEqual(obj1, obj2)
for s1, s2 in zip(obj1, obj2):
self.assertTrue(s1.equals(s2))
def test_roundtrip_biological_sequences(self):
fps = list(map(lambda e: list(map(get_data_path, e)),
[('fasta_multi_seq_roundtrip',
'qual_multi_seq_roundtrip'),
('fasta_sequence_collection_different_type',
'qual_sequence_collection_different_type')]))
for reader, writer in ((_fasta_to_biological_sequence,
_biological_sequence_to_fasta),
(_fasta_to_dna_sequence,
_dna_sequence_to_fasta),
(_fasta_to_rna_sequence,
_rna_sequence_to_fasta),
(_fasta_to_protein_sequence,
_protein_sequence_to_fasta)):
for fasta_fp, qual_fp in fps:
# read
obj1 = reader(fasta_fp, qual=qual_fp)
# write
fasta_fh = StringIO()
qual_fh = StringIO()
writer(obj1, fasta_fh, qual=qual_fh)
fasta_fh.seek(0)
qual_fh.seek(0)
# read
obj2 = reader(fasta_fh, qual=qual_fh)
fasta_fh.close()
qual_fh.close()
self.assertTrue(obj1.equals(obj2))
if __name__ == '__main__':
main()
|
jensreeder/scikit-bio
|
skbio/io/tests/test_fasta.py
|
Python
|
bsd-3-clause
| 42,435
|
[
"scikit-bio"
] |
18906792dc7065c01fbd5febcedc9d898d8b13e533eddf4c674fcbe442687a7c
|
# Copyright 2011 by Eric Talevich. All rights reserved.
# This code is part of the Biopython distribution and governed by its license.
# Please see the LICENSE file that should have been included as part of this
# package.
"""Command-line wrapper for the tree inference program PhyML."""
__docformat__ = "restructuredtext en"
import sys
# Add path to Bio
sys.path.append('../../..')
from Bio._py3k import basestring
from Bio.Application import _Option, _Switch, AbstractCommandline
class PhymlCommandline(AbstractCommandline):
"""Command-line wrapper for the tree inference program PhyML.
Homepage: http://www.atgc-montpellier.fr/phyml
Citations:
Guindon S, Gascuel O.
A simple, fast, and accurate algorithm to estimate large phylogenies by maximum
likelihood.
Systematic Biology, 2003 Oct;52(5):696-704.
PubMed PMID: 14530136.
Guindon S, Dufayard JF, Lefort V, Anisimova M, Hordijk W, Gascuel O.
New Algorithms and Methods to Estimate Maximum-Likelihood Phylogenies: Assessing
the Performance of PhyML 3.0.
Systematic Biology, 2010 59(3):307-21.
"""
def __init__(self, cmd='phyml', **kwargs):
self.parameters = [
_Option(['-i', '--input', 'input'],
"""Name of the nucleotide or amino-acid sequence file in PHYLIP
format.""",
filename=True,
is_required=True,
equate=False,
),
_Option(['-d', '--datatype', 'datatype'],
"""Data type is 'nt' for nucleotide (default) and 'aa' for
amino-acid sequences.""",
checker_function=lambda x: x in ('nt', 'aa'),
equate=False,
),
_Switch(['-q', '--sequential', 'sequential'],
"Changes interleaved format (default) to sequential format."
),
_Option(['-n', '--multiple', 'multiple'],
"Number of data sets to analyse (integer).",
checker_function=(lambda x:
isinstance(x, int) or x.isdigit()),
equate=False,
),
_Switch(['-p', '--pars', 'pars'],
"""Use a minimum parsimony starting tree.
This option is taken into account when the '-u' option is absent
and when tree topology modifications are to be done.
"""
),
_Option(['-b', '--bootstrap', 'bootstrap'],
"""Number of bootstrap replicates, if value is > 0.
Otherwise:
0: neither approximate likelihood ratio test nor bootstrap
values are computed.
-1: approximate likelihood ratio test returning aLRT statistics.
-2: approximate likelihood ratio test returning Chi2-based
parametric branch supports.
-4: SH-like branch supports alone.
""",
equate=False,
),
_Option(['-m', '--model', 'model'],
"""Substitution model name.
Nucleotide-based models:
HKY85 (default) | JC69 | K80 | F81 | F84 | TN93 | GTR | custom
For the custom option, a string of six digits identifies the
model. For instance, 000000 corresponds to F81 (or JC69,
provided the distribution of nucleotide frequencies is uniform).
012345 corresponds to GTR. This option can be used for encoding
any model that is a nested within GTR.
Amino-acid based models:
LG (default) | WAG | JTT | MtREV | Dayhoff | DCMut | RtREV |
CpREV | VT | Blosum62 | MtMam | MtArt | HIVw | HIVb | custom
""",
checker_function=(lambda x: x in (
# Nucleotide models:
'HKY85', 'JC69', 'K80', 'F81', 'F84', 'TN93', 'GTR',
# Amino acid models:
'LG', 'WAG', 'JTT', 'MtREV', 'Dayhoff', 'DCMut',
'RtREV', 'CpREV', 'VT', 'Blosum62', 'MtMam', 'MtArt',
'HIVw', 'HIVb')
or isinstance(x, int)),
equate=False,
),
_Option(['-f', 'frequencies'],
"""Character frequencies.
-f e, m, or "fA fC fG fT"
e : Empirical frequencies, determined as follows :
- Nucleotide sequences: (Empirical) the equilibrium base
frequencies are estimated by counting the occurence of the
different bases in the alignment.
- Amino-acid sequences: (Empirical) the equilibrium
amino-acid frequencies are estimated by counting the
occurence of the different amino-acids in the alignment.
m : ML/model-based frequencies, determined as follows :
- Nucleotide sequences: (ML) the equilibrium base
frequencies are estimated using maximum likelihood
- Amino-acid sequences: (Model) the equilibrium amino-acid
frequencies are estimated using the frequencies defined by
the substitution model.
"fA fC fG fT" : only valid for nucleotide-based models.
fA, fC, fG and fT are floating-point numbers that correspond
to the frequencies of A, C, G and T, respectively.
""",
filename=True, # ensure ".25 .25 .25 .25" stays quoted
equate=False,
),
_Option(['-t', '--ts/tv', 'ts_tv_ratio'],
"""Transition/transversion ratio. (DNA sequences only.)
Can be a fixed positive value (ex:4.0) or e to get the
maximum-likelihood estimate.
""",
equate=False,
),
_Option(['-v', '--pinv', 'prop_invar'],
"""Proportion of invariable sites.
Can be a fixed value in the range [0,1], or 'e' to get the
maximum-likelihood estimate.
""",
equate=False,
),
_Option(['-c', '--nclasses', 'nclasses'],
"""Number of relative substitution rate categories.
Default 1. Must be a positive integer.
""",
equate=False,
),
_Option(['-a', '--alpha', 'alpha'],
"""Distribution of the gamma distribution shape parameter.
Can be a fixed positive value, or 'e' to get the
maximum-likelihood estimate.
""",
equate=False,
),
_Option(['-s', '--search', 'search'],
"""Tree topology search operation option.
Can be one of:
NNI : default, fast
SPR : a bit slower than NNI
BEST : best of NNI and SPR search
""",
checker_function=lambda x: x in ('NNI', 'SPR', 'BEST'),
equate=False,
),
# alt name: user_tree_file
_Option(['-u', '--inputtree', 'input_tree'],
"Starting tree filename. The tree must be in Newick format.",
filename=True,
equate=False,
),
_Option(['-o', 'optimize'],
"""Specific parameter optimisation.
tlr : tree topology (t), branch length (l) and
rate parameters (r) are optimised.
tl : tree topology and branch length are optimised.
lr : branch length and rate parameters are optimised.
l : branch length are optimised.
r : rate parameters are optimised.
n : no parameter is optimised.
""",
equate=False,
),
_Switch(['--rand_start', 'rand_start'],
"""Sets the initial tree to random.
Only valid if SPR searches are to be performed.
""",
),
_Option(['--n_rand_starts', 'n_rand_starts'],
"""Number of initial random trees to be used.
Only valid if SPR searches are to be performed.
""",
equate=False,
),
_Option(['--r_seed', 'r_seed'],
"""Seed used to initiate the random number generator.
Must be an integer.
""",
equate=False,
),
_Switch(['--print_site_lnl', 'print_site_lnl'],
"Print the likelihood for each site in file *_phyml_lk.txt."
),
_Switch(['--print_trace', 'print_trace'],
"""Print each phylogeny explored during the tree search process
in file *_phyml_trace.txt."""
),
_Option(['--run_id', 'run_id'],
"""Append the given string at the end of each PhyML output file.
This option may be useful when running simulations involving
PhyML.
""",
checker_function=lambda x: isinstance(x, basestring),
equate=False,
),
# XXX should this always be set to True?
_Switch(['--quiet', 'quiet'],
"No interactive questions (for running in batch mode)."
),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/Phylo/Applications/_Phyml.py
|
Python
|
gpl-2.0
| 9,856
|
[
"Biopython"
] |
80bf05dd1f770af2c64133a26b1460cce10b55e68bfbd7cf91fd362366b8f18f
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds several text ads to a given ad group. To get ad_group_id,
run get_ad_groups.py.
Tags: AdGroupAdService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_group_id = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service.
ad_group_ad_service = client.GetAdGroupAdService(version='v201302')
# Construct operations and add ads.
operations = [
{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
},
# Optional fields.
'status': 'PAUSED'
}
# If needed, you could specify an exemption request here.
# 'exemptionRequests': [{
# # This comes back in a PolicyViolationError.
# 'key' {
# 'policyName': '...',
# 'violatingText': '...'
# }
# }]
},
{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': ad_group_id,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Enjoy your stay at Red Planet.',
'description2': 'Buy your tickets now!',
'headline': 'Luxury Cruise to Mars'
}
}
}
]
ads = ad_group_ad_service.Mutate(operations)[0]
# Display results.
for ad in ads['value']:
print ('Ad with id \'%s\' and of type \'%s\' was added.'
% (ad['ad']['id'], ad['ad']['Ad_Type']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_group_id)
|
donspaulding/adspygoogle
|
examples/adspygoogle/adwords/v201302/basic_operations/add_text_ads.py
|
Python
|
apache-2.0
| 3,173
|
[
"VisIt"
] |
5d0212d2140c61a39e5df37932a3a43cb424893426a0e430231f0b02a571fc53
|
#! python
"""
Slice a NetCDF file along the time dimension.
"""
# TODO: Generalize to slice along any combination of dimensions.
# Change args to source dest dim_name:index_range*
# Don't specify variables, slice em all.
from argparse import ArgumentParser
from netCDF4 import Dataset
def nc_copy(source_fp, dest_fp, time_dimension, time_indices, variables):
print('nc_copy({source_fp}, {dest_fp}, {time_dimension}, {time_indices}, {variables})'.format(**locals()))
with Dataset(source_fp) as source:
with Dataset(dest_fp, mode='w') as dest:
# Copy global attributes
for name in source.ncattrs():
dest.setncattr(name, source.getncattr(name))
# Create and copy dimensions
for name, dimension in source.dimensions.items():
if time_indices and name == time_dimension:
size = time_indices[1] - time_indices[0] + 1
else:
size = dimension.size
print('Copying dimension {} ({})'.format(name, size))
dest.createDimension(name, size=size)
# Create and copy variables
for name, source_variable in source.variables.items():
# Create variable
contiguous = False
chunksizes = None
# if source_variable.chunking() == 'contiguous':
# contiguous = True
# chunksizes = None
# else:
# contiguous = False
# chunksizes = source_variable.chunking()
# print('chunksizes={}'.format(chunksizes))
print('Copying variable {}'.format(name))
dest_variable = dest.createVariable(
name, source_variable.datatype,
dimensions=source_variable.dimensions,
**source_variable.filters(),
endian=source_variable.endian(),
contiguous=contiguous,
chunksizes=chunksizes,
)
# Copy variable attributes
for name in source_variable.ncattrs():
dest_variable.setncattr(name, source_variable.getncattr(name))
# Copy variable values
if (
# we want to process this variable
(not variables or name in variables)
# it has a time dimension
and time_dimension in source_variable.dimensions
# and time indices are specified
and time_indices
):
print('\tslicing')
# copy only a subset of the time dimension
slices = [slice(None),] * source_variable.ndim
t = source_variable.dimensions.index(time_dimension)
slices[t] = slice(time_indices[0], time_indices[1]+1)
dest_variable[:] = source_variable[tuple(slices)]
else:
dest_variable[:] = source_variable[:]
if __name__ == '__main__':
parser = ArgumentParser(
description='Copy a NetCDF file with some modifications, '
'namely selecting a subset of the times for specified '
'variables')
parser.add_argument(
'source', help='Source file to copy')
parser.add_argument(
'dest', help='Destination file')
parser.add_argument(
'--time-dimension', dest='time_dimension', default='time',
help='Time dimension name')
parser.add_argument(
'--time-indices', dest='time_indices', default=None,
help='Time indices')
parser.add_argument(
'--variables', default=None, help='Variables')
args = parser.parse_args()
nc_copy(
args.source, args.dest,
args.time_dimension,
args.time_indices and [int(i) for i in args.time_indices.split('-')],
args.variables and args.variables.split(',')
)
|
pacificclimate/modelmeta
|
scripts/nc-copy-modify.py
|
Python
|
gpl-3.0
| 4,104
|
[
"NetCDF"
] |
296f87034a46be95bfe351c98110b1af15cc078fea832a2ced3311d8ef2fbe4a
|
import shutil
import os
import time
import sys
import multiprocessing
import numpy as np
# deepTools packages
import deeptools.utilities
from deeptools import bamHandler
from deeptools import mapReduce
from deeptoolsintervals import GTF
import pyBigWig
debug = 0
old_settings = np.seterr(all='ignore')
def countReadsInRegions_wrapper(args):
"""
Passes the arguments to countReadsInRegions_worker.
This is a step required given
the constrains from the multiprocessing module.
The args var, contains as first element the 'self' value
from the countReadsPerBin object
"""
return CountReadsPerBin.count_reads_in_region(*args)
class CountReadsPerBin(object):
r"""Collects coverage over multiple bam files using multiprocessing
This function collects read counts (coverage) from several bam files and returns
an numpy array with the results. This class uses multiprocessing to compute the coverage.
Parameters
----------
bamFilesList : list
List containing the names of indexed bam files. E.g. ['file1.bam', 'file2.bam']
binLength : int
Length of the window/bin. This value is overruled by ``bedFile`` if present.
numberOfSamples : int
Total number of samples. The genome is divided into ``numberOfSamples``, each
with a window/bin length equal to ``binLength``. This value is overruled
by ``stepSize`` in case such value is present and by ``bedFile`` in which
case the number of samples and bins are defined in the bed file
numberOfProcessors : int
Number of processors to use. Default is 4
verbose : bool
Output messages. Default: False
region : str
Region to limit the computation in the form chrom:start:end.
bedFile : list of file_handles.
Each file handle corresponds to a bed file containing the regions for which to compute the coverage. This option
overrules ``binLength``, ``numberOfSamples`` and ``stepSize``.
blackListFileName : str
A string containing a BED file with blacklist regions.
extendReads : bool, int
Whether coverage should be computed for the extended read length (i.e. the region covered
by the two mates or the regions expected to be covered by single-reads).
If the value is 'int', then then this is interpreted as the fragment length to extend reads
that are not paired. For Illumina reads, usual values are around 300.
This value can be determined using the peak caller MACS2 or can be
approximated by the fragment lengths computed when preparing the library for sequencing. If the value
is of the variable is true and not value is given, the fragment size is sampled from the library but
only if the library is paired-end. Default: False
minMappingQuality : int
Reads of a mapping quality less than the give value are not considered. Default: None
ignoreDuplicates : bool
Whether read duplicates (same start, end position. If paired-end, same start-end for mates) are
to be excluded. Default: false
chrToSkip: list
List with names of chromosomes that do not want to be included in the coverage computation.
This is useful to remove unwanted chromosomes (e.g. 'random' or 'Het').
stepSize : int
the positions for which the coverage is computed are defined as follows:
``range(start, end, stepSize)``. Thus, a stepSize of 1, will compute
the coverage at each base pair. If the stepSize is equal to the
binLength then the coverage is computed for consecutive bins. If seepSize is
smaller than the binLength, then teh bins will overlap.
center_read : bool
Determines if reads should be centered with respect to the fragment length.
samFlag_include : int
Extracts only those reads having the SAM flag. For example, to get only
reads that are the first mates a samFlag of 64 could be used. Similarly, the
samFlag_include can be used to select only reads mapping on the reverse strand
or to get only properly paired reads.
samFlag_exclude : int
Removes reads that match the SAM flag. For example to get all reads
that map to the forward strand a samFlag_exlude 16 should be used. Which
translates into exclude all reads that map to the reverse strand.
zerosToNans : bool
If true, zero values encountered are transformed to Nans. Default false.
skipZeroOverZero : bool
If true, skip bins where all input BAM files have no coverage (only applicable to bamCompare).
minFragmentLength : int
If greater than 0, fragments below this size are excluded.
maxFragmentLength : int
If greater than 0, fragments above this size are excluded.
out_file_for_raw_data : str
File name to save the raw counts computed
statsList : list
For each BAM file in bamFilesList, the associated per-chromosome statistics returned by openBam
mappedList : list
For each BAM file in bamFilesList, the number of mapped reads in the file.
bed_and_bin : boolean
If true AND a bedFile is given, compute coverage of each bin of the given size in each region of bedFile
genomeChunkSize : int
If not None, the length of the genome used for multiprocessing.
Returns
-------
numpy array
Each row correspond to each bin/bed region and each column correspond to each of
the bamFiles.
Examples
--------
The test data contains reads for 200 bp.
>>> test = Tester()
The transpose function is used to get a nicer looking output.
The first line corresponds to the number of reads per bin in bam file 1
>>> c = CountReadsPerBin([test.bamFile1, test.bamFile2], 50, 4)
>>> np.transpose(c.run())
array([[0., 0., 1., 1.],
[0., 1., 1., 2.]])
"""
def __init__(self, bamFilesList, binLength=50, numberOfSamples=None, numberOfProcessors=1,
verbose=False, region=None,
bedFile=None, extendReads=False,
genomeChunkSize=None,
blackListFileName=None,
minMappingQuality=None,
ignoreDuplicates=False,
chrsToSkip=[],
stepSize=None,
center_read=False,
samFlag_include=None,
samFlag_exclude=None,
zerosToNans=False,
skipZeroOverZero=False,
smoothLength=0,
minFragmentLength=0,
maxFragmentLength=0,
out_file_for_raw_data=None,
bed_and_bin=False,
statsList=[],
mappedList=[]):
self.bamFilesList = bamFilesList
self.binLength = binLength
self.numberOfSamples = numberOfSamples
self.blackListFileName = blackListFileName
self.statsList = statsList
self.mappedList = mappedList
self.skipZeroOverZero = skipZeroOverZero
self.bed_and_bin = bed_and_bin
self.genomeChunkSize = genomeChunkSize
if extendReads and len(bamFilesList):
from deeptools.getFragmentAndReadSize import get_read_and_fragment_length
frag_len_dict, read_len_dict = get_read_and_fragment_length(bamFilesList[0],
return_lengths=False,
blackListFileName=blackListFileName,
numberOfProcessors=numberOfProcessors,
verbose=verbose)
if extendReads is True:
# try to guess fragment length if the bam file contains paired end reads
if frag_len_dict:
self.defaultFragmentLength = int(frag_len_dict['median'])
else:
exit("*ERROR*: library is not paired-end. Please provide an extension length.")
if verbose:
print(("Fragment length based on paired en data "
"estimated to be {}".format(frag_len_dict['median'])))
elif extendReads < read_len_dict['median']:
sys.stderr.write("*WARNING*: read extension is smaller than read length (read length = {}). "
"Reads will not be extended.\n".format(int(read_len_dict['median'])))
self.defaultFragmentLength = 'read length'
elif extendReads > 2000:
exit("*ERROR*: read extension must be smaller that 2000. Value give: {} ".format(extendReads))
else:
self.defaultFragmentLength = int(extendReads)
else:
self.defaultFragmentLength = 'read length'
self.numberOfProcessors = numberOfProcessors
self.verbose = verbose
self.region = region
self.bedFile = bedFile
self.minMappingQuality = minMappingQuality
self.ignoreDuplicates = ignoreDuplicates
self.chrsToSkip = chrsToSkip
self.stepSize = stepSize
self.center_read = center_read
self.samFlag_include = samFlag_include
self.samFlag_exclude = samFlag_exclude
self.minFragmentLength = minFragmentLength
self.maxFragmentLength = maxFragmentLength
self.zerosToNans = zerosToNans
self.smoothLength = smoothLength
if out_file_for_raw_data:
self.save_data = True
self.out_file_for_raw_data = out_file_for_raw_data
else:
self.save_data = False
self.out_file_for_raw_data = None
# check that wither numberOfSamples or stepSize are set
if numberOfSamples is None and stepSize is None and bedFile is None:
raise ValueError("either stepSize, numberOfSamples or bedFile have to be set")
if self.defaultFragmentLength != 'read length':
self.maxPairedFragmentLength = 4 * self.defaultFragmentLength
else:
self.maxPairedFragmentLength = 1000
if self.maxFragmentLength > 0:
self.maxPairedFragmentLength = self.maxFragmentLength
if len(self.mappedList) == 0:
try:
for fname in self.bamFilesList:
bam, mapped, unmapped, stats = bamHandler.openBam(fname, returnStats=True, nThreads=self.numberOfProcessors)
self.mappedList.append(mapped)
self.statsList.append(stats)
bam.close()
except:
self.mappedList = []
self.statsList = []
def get_chunk_length(self, bamFilesHandles, genomeSize, chromSizes, chrLengths):
# Try to determine an optimal fraction of the genome (chunkSize) that is sent to
# workers for analysis. If too short, too much time is spent loading the files
# if too long, some processors end up free.
# the following values are empirical
if self.stepSize is None:
if self.region is None:
self.stepSize = max(int(float(genomeSize) / self.numberOfSamples), 1)
else:
# compute the step size, based on the number of samples
# and the length of the region studied
(chrom, start, end) = mapReduce.getUserRegion(chromSizes, self.region)[:3]
self.stepSize = max(int(float(end - start) / self.numberOfSamples), 1)
# number of samples is better if large
if np.mean(chrLengths) < self.stepSize and self.bedFile is None:
min_num_of_samples = int(genomeSize / np.mean(chrLengths))
raise ValueError("numberOfSamples has to be bigger than {} ".format(min_num_of_samples))
max_mapped = 0
if len(self.mappedList) > 0:
max_mapped = max(self.mappedList)
# If max_mapped is 0 (i.e., bigWig input), set chunkSize to a multiple of binLength and use every bin
if max_mapped == 0:
chunkSize = 10000 * self.binLength
self.stepSize = self.binLength
else:
reads_per_bp = float(max_mapped) / genomeSize
chunkSize = int(self.stepSize * 1e3 / (reads_per_bp * len(bamFilesHandles)))
# Ensure that chunkSize is always at least self.stepSize
if chunkSize < self.stepSize:
chunkSize = self.stepSize
# Ensure that chunkSize is always at least self.binLength
if self.binLength and chunkSize < self.binLength:
chunkSize = self.binLength
return chunkSize
def run(self, allArgs=None):
bamFilesHandles = []
for x in self.bamFilesList:
try:
y = bamHandler.openBam(x)
except SystemExit:
sys.exit(sys.exc_info()[1])
except:
y = pyBigWig.open(x)
bamFilesHandles.append(y)
chromsizes, non_common = deeptools.utilities.getCommonChrNames(bamFilesHandles, verbose=self.verbose)
# skip chromosome in the list. This is usually for the
# X chromosome which may have either one copy in a male sample
# or a mixture of male/female and is unreliable.
# Also the skip may contain heterochromatic regions and
# mitochondrial DNA
if len(self.chrsToSkip):
chromsizes = [x for x in chromsizes if x[0] not in self.chrsToSkip]
chrNames, chrLengths = list(zip(*chromsizes))
genomeSize = sum(chrLengths)
chunkSize = None
if self.bedFile is None:
if self.genomeChunkSize is None:
chunkSize = self.get_chunk_length(bamFilesHandles, genomeSize, chromsizes, chrLengths)
else:
chunkSize = self.genomeChunkSize
[bam_h.close() for bam_h in bamFilesHandles]
if self.verbose:
print("step size is {}".format(self.stepSize))
if self.region:
# in case a region is used, append the tilesize
self.region += ":{}".format(self.binLength)
# Handle GTF options
transcriptID, exonID, transcript_id_designator, keepExons = deeptools.utilities.gtfOptions(allArgs)
# use map reduce to call countReadsInRegions_wrapper
imap_res = mapReduce.mapReduce([],
countReadsInRegions_wrapper,
chromsizes,
self_=self,
genomeChunkLength=chunkSize,
bedFile=self.bedFile,
blackListFileName=self.blackListFileName,
region=self.region,
numberOfProcessors=self.numberOfProcessors,
transcriptID=transcriptID,
exonID=exonID,
keepExons=keepExons,
transcript_id_designator=transcript_id_designator)
if self.out_file_for_raw_data:
if len(non_common):
sys.stderr.write("*Warning*\nThe resulting bed file does not contain information for "
"the chromosomes that were not common between the bigwig files\n")
# concatenate intermediary bedgraph files
ofile = open(self.out_file_for_raw_data, "w")
for _values, tempFileName in imap_res:
if tempFileName:
# concatenate all intermediate tempfiles into one
_foo = open(tempFileName, 'r')
shutil.copyfileobj(_foo, ofile)
_foo.close()
os.remove(tempFileName)
ofile.close()
try:
num_reads_per_bin = np.concatenate([x[0] for x in imap_res], axis=0)
return num_reads_per_bin
except ValueError:
if self.bedFile:
sys.exit('\nNo coverage values could be computed.\n\n'
'Please check that the chromosome names in the BED file are found on the bam files.\n\n'
'The valid chromosome names are:\n{}'.format(chrNames))
else:
sys.exit('\nNo coverage values could be computed.\n\nCheck that all bam files are valid and '
'contain mapped reads.')
def count_reads_in_region(self, chrom, start, end, bed_regions_list=None):
"""Counts the reads in each bam file at each 'stepSize' position
within the interval (start, end) for a window or bin of size binLength.
The stepSize controls the distance between bins. For example,
a step size of 20 and a bin size of 20 will create bins next to
each other. If the step size is smaller than the bin size the
bins will overlap.
If a list of bedRegions is given, then the number of reads
that overlaps with each region is counted.
Parameters
----------
chrom : str
Chrom name
start : int
start coordinate
end : int
end coordinate
bed_regions_list: list
List of list of tuples of the form (start, end)
corresponding to bed regions to be processed.
If not bed file was passed to the object constructor
then this list is empty.
Returns
-------
numpy array
The result is a numpy array that as rows each bin
and as columns each bam file.
Examples
--------
Initialize some useful values
>>> test = Tester()
>>> c = CountReadsPerBin([test.bamFile1, test.bamFile2], 25, 0, stepSize=50)
The transpose is used to get better looking numbers. The first line
corresponds to the number of reads per bin in the first bamfile.
>>> _array, __ = c.count_reads_in_region(test.chrom, 0, 200)
>>> _array
array([[0., 0.],
[0., 1.],
[1., 1.],
[1., 2.]])
"""
if start > end:
raise NameError("start %d bigger that end %d" % (start, end))
if self.stepSize is None and bed_regions_list is None:
raise ValueError("stepSize is not set!")
# array to keep the read counts for the regions
subnum_reads_per_bin = []
start_time = time.time()
bam_handles = []
for fname in self.bamFilesList:
try:
bam_handles.append(bamHandler.openBam(fname))
except SystemExit:
sys.exit(sys.exc_info()[1])
except:
bam_handles.append(pyBigWig.open(fname))
blackList = None
if self.blackListFileName is not None:
blackList = GTF(self.blackListFileName)
# A list of lists of tuples
transcriptsToConsider = []
if bed_regions_list is not None:
if self.bed_and_bin:
transcriptsToConsider.append([(x[1][0][0], x[1][0][1], self.binLength) for x in bed_regions_list])
else:
transcriptsToConsider = [x[1] for x in bed_regions_list]
else:
if self.stepSize == self.binLength:
transcriptsToConsider.append([(start, end, self.binLength)])
else:
for i in range(start, end, self.stepSize):
if i + self.binLength > end:
break
if blackList is not None and blackList.findOverlaps(chrom, i, i + self.binLength):
continue
transcriptsToConsider.append([(i, i + self.binLength)])
if self.save_data:
_file = open(deeptools.utilities.getTempFileName(suffix='.bed'), 'w+t')
_file_name = _file.name
else:
_file_name = ''
for bam in bam_handles:
for trans in transcriptsToConsider:
tcov = self.get_coverage_of_region(bam, chrom, trans)
if bed_regions_list is not None and not self.bed_and_bin:
subnum_reads_per_bin.append(np.sum(tcov))
else:
subnum_reads_per_bin.extend(tcov)
subnum_reads_per_bin = np.concatenate([subnum_reads_per_bin]).reshape(-1, len(self.bamFilesList), order='F')
if self.save_data:
idx = 0
for i, trans in enumerate(transcriptsToConsider):
if len(trans[0]) != 3:
starts = ",".join([str(x[0]) for x in trans])
ends = ",".join([str(x[1]) for x in trans])
_file.write("\t".join([chrom, starts, ends]) + "\t")
_file.write("\t".join(["{}".format(x) for x in subnum_reads_per_bin[i, :]]) + "\n")
else:
for exon in trans:
for startPos in range(exon[0], exon[1], exon[2]):
if idx >= subnum_reads_per_bin.shape[0]:
# At the end of chromosomes (or due to blacklisted regions), there are bins smaller than the bin size
# Counts there are added to the bin before them, but range() will still try to include them.
break
_file.write("{0}\t{1}\t{2}\t".format(chrom, startPos, min(startPos + exon[2], exon[1])))
_file.write("\t".join(["{}".format(x) for x in subnum_reads_per_bin[idx, :]]) + "\n")
idx += 1
_file.close()
if self.verbose:
endTime = time.time()
rows = subnum_reads_per_bin.shape[0]
print("%s countReadsInRegions_worker: processing %d "
"(%.1f per sec) @ %s:%s-%s" %
(multiprocessing.current_process().name,
rows, rows / (endTime - start_time), chrom, start, end))
return subnum_reads_per_bin, _file_name
def get_coverage_of_region(self, bamHandle, chrom, regions,
fragmentFromRead_func=None):
"""
Returns a numpy array that corresponds to the number of reads
that overlap with each tile.
>>> test = Tester()
>>> import pysam
>>> c = CountReadsPerBin([], stepSize=1, extendReads=300)
For this case the reads are length 36. The number of overlapping
read fragments is 4 and 5 for the positions tested.
>>> c.get_coverage_of_region(pysam.AlignmentFile(test.bamFile_PE), 'chr2',
... [(5000833, 5000834), (5000834, 5000835)])
array([4., 5.])
In the following example a paired read is extended to the fragment length which is 100
The first mate starts at 5000000 and the second at 5000064. Each mate is
extended to the fragment length *independently*
At position 500090-500100 one fragment of length 100 overlap, and after position 5000101
there should be zero reads.
>>> c.zerosToNans = True
>>> c.get_coverage_of_region(pysam.AlignmentFile(test.bamFile_PE), 'chr2',
... [(5000090, 5000100), (5000100, 5000110)])
array([ 1., nan])
In the following case the reads length is 50. Reads are not extended.
>>> c.extendReads=False
>>> c.get_coverage_of_region(pysam.AlignmentFile(test.bamFile2), '3R', [(148, 150), (150, 152), (152, 154)])
array([1., 2., 2.])
"""
if not fragmentFromRead_func:
fragmentFromRead_func = self.get_fragment_from_read
nbins = len(regions)
if len(regions[0]) == 3:
nbins = 0
for reg in regions:
nbins += (reg[1] - reg[0]) // reg[2]
if (reg[1] - reg[0]) % reg[2] > 0:
nbins += 1
coverages = np.zeros(nbins, dtype='float64')
if self.defaultFragmentLength == 'read length':
extension = 0
else:
extension = self.maxPairedFragmentLength
blackList = None
if self.blackListFileName is not None:
blackList = GTF(self.blackListFileName)
vector_start = 0
for idx, reg in enumerate(regions):
if len(reg) == 3:
tileSize = int(reg[2])
nRegBins = (reg[1] - reg[0]) // tileSize
if (reg[1] - reg[0]) % tileSize > 0:
# Don't eliminate small bins! Issue 887
nRegBins += 1
else:
nRegBins = 1
tileSize = int(reg[1] - reg[0])
# Blacklisted regions have a coverage of 0
if blackList and blackList.findOverlaps(chrom, reg[0], reg[1]):
continue
regStart = int(max(0, reg[0] - extension))
regEnd = reg[1] + int(extension)
# If alignments are extended and there's a blacklist, ensure that no
# reads originating in a blacklist are fetched
if blackList and reg[0] > 0 and extension > 0:
o = blackList.findOverlaps(chrom, regStart, reg[0])
if o is not None and len(o) > 0:
regStart = o[-1][1]
o = blackList.findOverlaps(chrom, reg[1], regEnd)
if o is not None and len(o) > 0:
regEnd = o[0][0]
start_time = time.time()
# caching seems faster. TODO: profile the function
c = 0
if chrom not in bamHandle.references:
raise NameError("chromosome {} not found in bam file".format(chrom))
prev_pos = set()
lpos = None
# of previous processed read pair
for read in bamHandle.fetch(chrom, regStart, regEnd):
if read.is_unmapped:
continue
if self.minMappingQuality and read.mapq < self.minMappingQuality:
continue
# filter reads based on SAM flag
if self.samFlag_include and read.flag & self.samFlag_include != self.samFlag_include:
continue
if self.samFlag_exclude and read.flag & self.samFlag_exclude != 0:
continue
# Fragment lengths
tLen = deeptools.utilities.getTLen(read)
if self.minFragmentLength > 0 and tLen < self.minFragmentLength:
continue
if self.maxFragmentLength > 0 and tLen > self.maxFragmentLength:
continue
# get rid of duplicate reads that have same position on each of the
# pairs
if self.ignoreDuplicates:
# Assuming more or less concordant reads, use the fragment bounds, otherwise the start positions
if tLen >= 0:
s = read.pos
e = s + tLen
else:
s = read.pnext
e = s - tLen
if read.reference_id != read.next_reference_id:
e = read.pnext
if lpos is not None and lpos == read.reference_start \
and (s, e, read.next_reference_id, read.is_reverse) in prev_pos:
continue
if lpos != read.reference_start:
prev_pos.clear()
lpos = read.reference_start
prev_pos.add((s, e, read.next_reference_id, read.is_reverse))
# since reads can be split (e.g. RNA-seq reads) each part of the
# read that maps is called a position block.
try:
position_blocks = fragmentFromRead_func(read)
except TypeError:
# the get_fragment_from_read functions returns None in some cases.
# Those cases are to be skipped, hence the continue line.
continue
last_eIdx = None
for fragmentStart, fragmentEnd in position_blocks:
if fragmentEnd is None or fragmentStart is None:
continue
fragmentLength = fragmentEnd - fragmentStart
if fragmentLength == 0:
continue
# skip reads that are not in the region being
# evaluated.
if fragmentEnd <= reg[0] or fragmentStart >= reg[1]:
continue
if fragmentStart < reg[0]:
fragmentStart = reg[0]
if fragmentEnd > reg[0] + len(coverages) * tileSize:
fragmentEnd = reg[0] + len(coverages) * tileSize
sIdx = vector_start + max((fragmentStart - reg[0]) // tileSize, 0)
eIdx = vector_start + min(np.ceil(float(fragmentEnd - reg[0]) / tileSize).astype('int'), nRegBins)
if last_eIdx is not None:
sIdx = max(last_eIdx, sIdx)
if sIdx >= eIdx:
continue
sIdx = int(sIdx)
eIdx = int(eIdx)
coverages[sIdx:eIdx] += 1
last_eIdx = eIdx
c += 1
if self.verbose:
endTime = time.time()
print("%s, processing %s (%.1f per sec) reads @ %s:%s-%s" % (
multiprocessing.current_process().name, c, c / (endTime - start_time), chrom, reg[0], reg[1]))
vector_start += nRegBins
# change zeros to NAN
if self.zerosToNans:
coverages[coverages == 0] = np.nan
return coverages
def getReadLength(self, read):
return len(read)
@staticmethod
def is_proper_pair(read, maxPairedFragmentLength):
"""
Checks if a read is proper pair meaning that both mates are facing each other and are in
the same chromosome and are not to far away. The sam flag for proper pair can not
always be trusted. Note that if the fragment size is > maxPairedFragmentLength (~2kb
usually) that False will be returned.
:return: bool
>>> import pysam
>>> import os
>>> from deeptools.countReadsPerBin import CountReadsPerBin as cr
>>> root = os.path.dirname(os.path.abspath(__file__)) + "/test/test_data/"
>>> bam = pysam.AlignmentFile("{}/test_proper_pair_filtering.bam".format(root))
>>> iter = bam.fetch()
>>> read = next(iter)
>>> cr.is_proper_pair(read, 1000) # "keep" read
True
>>> cr.is_proper_pair(read, 200) # "keep" read, but maxPairedFragmentLength is too short
False
>>> read = next(iter)
>>> cr.is_proper_pair(read, 1000) # "improper pair"
False
>>> read = next(iter)
>>> cr.is_proper_pair(read, 1000) # "mismatch chr"
False
>>> read = next(iter)
>>> cr.is_proper_pair(read, 1000) # "same orientation1"
False
>>> read = next(iter)
>>> cr.is_proper_pair(read, 1000) # "same orientation2"
False
>>> read = next(iter)
>>> cr.is_proper_pair(read, 1000) # "rev first"
False
>>> read = next(iter)
>>> cr.is_proper_pair(read, 1000) # "rev first OK"
True
>>> read = next(iter)
>>> cr.is_proper_pair(read, 1000) # "for first"
False
>>> read = next(iter)
>>> cr.is_proper_pair(read, 1000) # "for first"
True
"""
if not read.is_proper_pair:
return False
if read.reference_id != read.next_reference_id:
return False
if abs(read.template_length) > maxPairedFragmentLength:
return False
# check that the mates face each other (inward)
if read.is_reverse is read.mate_is_reverse:
return False
if read.is_reverse:
if read.reference_start >= read.next_reference_start:
return True
else:
if read.reference_start <= read.next_reference_start:
return True
return False
def get_fragment_from_read(self, read):
"""Get read start and end position of a read.
If given, the reads are extended as follows:
If reads are paired end, each read mate is extended to match
the fragment length, otherwise, a default fragment length
is used. If reads are split (give by the CIGAR string) then
the multiple positions of the read are returned.
When reads are extended the cigar information is
skipped.
Parameters
----------
read: pysam object.
The following values are defined (for forward reads)::
|-- -- read.tlen -- --|
|-- read.alen --|
-----|===============>------------<==============|----
| | |
read.reference_start
read.reference_end read.pnext
and for reverse reads
|-- -- read.tlen -- --|
|-- read.alen --|
-----|===============>-----------<===============|----
| | |
read.pnext read.reference_start read.reference_end
this is a sketch of a pair-end reads
The function returns the fragment start and end, either
using the paired end information (if available) or
extending the read in the appropriate direction if this
is single-end.
Parameters
----------
read : pysam read object
Returns
-------
list of tuples
[(fragment start, fragment end)]
>>> test = Tester()
>>> c = CountReadsPerBin([], 1, 1, 200, extendReads=True)
>>> c.defaultFragmentLength=100
>>> c.get_fragment_from_read(test.getRead("paired-forward"))
[(5000000, 5000100)]
>>> c.get_fragment_from_read(test.getRead("paired-reverse"))
[(5000000, 5000100)]
>>> c.defaultFragmentLength = 200
>>> c.get_fragment_from_read(test.getRead("single-forward"))
[(5001491, 5001691)]
>>> c.get_fragment_from_read(test.getRead("single-reverse"))
[(5001536, 5001736)]
>>> c.defaultFragmentLength = 'read length'
>>> c.get_fragment_from_read(test.getRead("single-forward"))
[(5001491, 5001527)]
>>> c.defaultFragmentLength = 'read length'
>>> c.extendReads = False
>>> c.get_fragment_from_read(test.getRead("paired-forward"))
[(5000000, 5000036)]
Tests for read centering.
>>> c = CountReadsPerBin([], 1, 1, 200, extendReads=True, center_read=True)
>>> c.defaultFragmentLength = 100
>>> assert(c.get_fragment_from_read(test.getRead("paired-forward")) == [(5000032, 5000068)])
>>> c.defaultFragmentLength = 200
>>> assert(c.get_fragment_from_read(test.getRead("single-reverse")) == [(5001618, 5001654)])
"""
# if no extension is needed, use pysam get_blocks
# to identify start and end reference positions.
# get_blocks return a list of start and end positions
# based on the CIGAR if skipped regions are found.
# E.g for a cigar of 40M260N22M
# get blocks return two elements for the first 40 matches
# and the for the last 22 matches.
if self.defaultFragmentLength == 'read length':
return read.get_blocks()
else:
if self.is_proper_pair(read, self.maxPairedFragmentLength):
if read.is_reverse:
fragmentStart = read.next_reference_start
fragmentEnd = read.reference_end
else:
fragmentStart = read.reference_start
# the end of the fragment is defined as
# the start of the forward read plus the insert length
fragmentEnd = read.reference_start + abs(read.template_length)
# Extend using the default fragment length
else:
if read.is_reverse:
fragmentStart = read.reference_end - self.defaultFragmentLength
fragmentEnd = read.reference_end
else:
fragmentStart = read.reference_start
fragmentEnd = read.reference_start + self.defaultFragmentLength
if self.center_read:
fragmentCenter = fragmentEnd - (fragmentEnd - fragmentStart) / 2
fragmentStart = int(fragmentCenter - read.infer_query_length(always=False) / 2)
fragmentEnd = fragmentStart + read.infer_query_length(always=False)
assert fragmentStart < fragmentEnd, "fragment start greater than fragment" \
"end for read {}".format(read.query_name)
return [(fragmentStart, fragmentEnd)]
def getSmoothRange(self, tileIndex, tileSize, smoothRange, maxPosition):
"""
Given a tile index position and a tile size (length), return the a new indices
over a larger range, called the smoothRange.
This region is centered in the tileIndex an spans on both sizes
to cover the smoothRange. The smoothRange is trimmed in case it is less
than zero or greater than maxPosition ::
---------------|==================|------------------
tileStart
|--------------------------------------|
| <-- smoothRange --> |
|
tileStart - (smoothRange-tileSize)/2
Test for a smooth range that spans 3 tiles.
Examples
--------
>>> c = CountReadsPerBin([], 1, 1, 1, 0)
>>> c.getSmoothRange(5, 1, 3, 10)
(4, 7)
Test smooth range truncated on start.
>>> c.getSmoothRange(0, 10, 30, 200)
(0, 2)
Test smooth range truncated on start.
>>> c.getSmoothRange(1, 10, 30, 4)
(0, 3)
Test smooth range truncated on end.
>>> c.getSmoothRange(5, 1, 3, 5)
(4, 5)
Test smooth range not multiple of tileSize.
>>> c.getSmoothRange(5, 10, 24, 10)
(4, 6)
"""
smoothTiles = int(smoothRange / tileSize)
if smoothTiles == 1:
return (tileIndex, tileIndex + 1)
smoothTilesSide = float(smoothTiles - 1) / 2
smoothTilesLeft = int(np.ceil(smoothTilesSide))
smoothTilesRight = int(np.floor(smoothTilesSide)) + 1
indexStart = max(tileIndex - smoothTilesLeft, 0)
indexEnd = min(maxPosition, tileIndex + smoothTilesRight)
return (indexStart, indexEnd)
def remove_row_of_zeros(matrix):
# remove rows containing all zeros or all nans
_mat = np.nan_to_num(matrix)
to_keep = _mat.sum(1) != 0
return matrix[to_keep, :]
def estimateSizeFactors(m):
"""
Compute size factors in the same way as DESeq2.
The inverse of that is returned, as it's then compatible with bamCoverage.
m : a numpy ndarray
>>> m = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 10, 0], [10, 5, 100]])
>>> sf = estimateSizeFactors(m)
>>> assert(np.all(np.abs(sf - [1.305, 0.9932, 0.783]) < 1e-4))
>>> m = np.array([[0, 0], [0, 1], [1, 1], [1, 2]])
>>> sf = estimateSizeFactors(m)
>>> assert(np.all(np.abs(sf - [1.1892, 0.8409]) < 1e-4))
"""
loggeomeans = np.sum(np.log(m), axis=1) / m.shape[1]
# Mask after computing the geometric mean
m = np.ma.masked_where(m <= 0, m)
loggeomeans = np.ma.masked_where(np.isinf(loggeomeans), loggeomeans)
# DESeq2 ratio-based size factor
sf = np.exp(np.ma.median((np.log(m).T - loggeomeans).T, axis=0))
return 1. / sf
class Tester(object):
def __init__(self):
"""
The distribution of reads between the two bam files is as follows.
They cover 200 bp
0 100 200
|------------------------------------------------------------|
A ===============
===============
B =============== ===============
===============
===============
"""
self.root = os.path.dirname(os.path.abspath(__file__)) + "/test/test_data/"
# self.root = "./test/test_data/"
self.bamFile1 = self.root + "testA.bam"
self.bamFile2 = self.root + "testB.bam"
self.bamFile_PE = self.root + "test_paired2.bam"
self.chrom = '3R'
global debug
debug = 0
def getRead(self, readType):
""" prepare arguments for test
"""
bam = bamHandler.openBam(self.bamFile_PE)
if readType == 'paired-reverse':
read = [x for x in bam.fetch('chr2', 5000081, 5000082)][0]
elif readType == 'single-forward':
read = [x for x in bam.fetch('chr2', 5001491, 5001492)][0]
elif readType == 'single-reverse':
read = [x for x in bam.fetch('chr2', 5001700, 5001701)][0]
else: # by default a forward paired read is returned
read = [x for x in bam.fetch('chr2', 5000027, 5000028)][0]
return read
|
fidelram/deepTools
|
deeptools/countReadsPerBin.py
|
Python
|
gpl-3.0
| 42,163
|
[
"pysam"
] |
e2327c96aece2d0b21d5d20417d155db8bfaa8c29ca57f47de734cc63d5614dc
|
'''
This modules contains OpenMP-related stuff.
* OMPDirective is used to represent OpenMP annotations in the AST
* GatherOMPData turns OpenMP-like string annotations into metadata
'''
from pythran.passmanager import Transformation
import pythran.metadata as metadata
from pythran.spec import parse_pytypes
from pythran.types.conversion import pytype_to_ctype
from pythran.utils import isstr
from gast import AST
import gast as ast
import re
keywords = {
'atomic',
'barrier',
'capture',
'cancel',
'collapse',
'copyin',
'copyprivate',
'critical',
'declare',
'default',
'final',
'firstprivate',
'flush',
'for',
'if',
'initializer',
'lastprivate',
'master',
'mergeable',
'none',
'nowait',
'num_threads',
'omp',
'ordered',
'parallel',
'private',
'read',
'reduction',
'schedule',
'section',
'sections',
'shared',
'simd',
'single',
'task',
'taskwait',
'taskyield',
'threadprivate',
'untied',
'update',
'write'
}
declare_keywords = {
'omp_in',
'omp_init',
'omp_orig',
'omp_out',
'omp_priv',
}
reserved_contex = {
'critical',
'declare',
'default',
'schedule',
'reduction',
}
def is_declare_typename(s, offset, bounds):
start = s.rfind(':', 0, offset - 1)
stop = s.rfind(':', offset + 1)
if start > 0 and stop > 0:
bounds.extend((start + 1, stop))
return True
else:
return False
class OMPDirective(AST):
'''Turn a string into a context-dependent metadata.
>>> o = OMPDirective("omp for private(a,b) shared(c)")
>>> o.s
'omp for private({},{}) shared({})'
>>> [ type(dep) for dep in o.deps ]
[<class 'gast.gast.Name'>, <class 'gast.gast.Name'>, \
<class 'gast.gast.Name'>]
>>> [ dep.id for dep in o.deps ]
['a', 'b', 'c']
'''
def __init__(self, *args): # no positional argument to be deep copyable
super(OMPDirective, self).__init__()
if not args:
return
self.deps = []
self.private_deps = []
self.shared_deps = []
def tokenize(s):
'''A simple contextual "parser" for an OpenMP string'''
# not completely satisfying if there are strings in if expressions
out = ''
par_count = 0
curr_index = 0
in_reserved_context = False
in_declare = False
in_shared = in_private = False
while curr_index < len(s):
bounds = []
if in_declare and is_declare_typename(s, curr_index, bounds):
start, stop = bounds
pytypes = parse_pytypes(s[start:stop])
out += ', '.join(map(pytype_to_ctype, pytypes))
curr_index = stop
continue
m = re.match(r'^([a-zA-Z_]\w*)', s[curr_index:])
if m:
word = m.group(0)
curr_index += len(word)
if(in_reserved_context or
(in_declare and word in declare_keywords) or
(par_count == 0 and word in keywords)):
out += word
in_reserved_context = word in reserved_contex
in_declare |= word == 'declare'
in_private |= word == 'private'
in_shared |= word == 'shared'
else:
out += '{}'
self.deps.append(ast.Name(word, ast.Load(),
None, None))
isattr = re.match(r'^\s*(\.\s*[a-zA-Z_]\w*)', s[curr_index:])
if isattr:
attr = isattr.group(0)
curr_index += len(attr)
self.deps[-1] = ast.Attribute(self.deps[-1],
attr[1:], ast.Load())
if in_private:
self.private_deps.append(self.deps[-1])
if in_shared:
self.shared_deps.append(self.deps[-1])
elif s[curr_index] == '(':
par_count += 1
curr_index += 1
out += '('
elif s[curr_index] == ')':
par_count -= 1
curr_index += 1
out += ')'
if par_count == 0:
in_reserved_context = False
in_shared = in_private = False
else:
if s[curr_index] in ',:':
in_reserved_context = False
out += s[curr_index]
curr_index += 1
return out
self.s = tokenize(args[0])
self._fields = ('deps', 'shared_deps', 'private_deps')
##
class GatherOMPData(Transformation):
'''Walks node and collect string comments looking for OpenMP directives.'''
# there is a special handling for If and Expr, so not listed here
statements = ("FunctionDef", "Return", "Delete", "Assign", "AugAssign",
"Print", "For", "While", "Raise", "TryExcept", "TryFinally",
"Assert", "Import", "ImportFrom", "Pass", "Break",)
# these fields hold statement lists
statement_lists = ("body", "orelse", "finalbody",)
def __init__(self):
Transformation.__init__(self)
# Remap self.visit_XXXX() to self.attach_data() generic method
for s in GatherOMPData.statements:
setattr(self, "visit_" + s, self.attach_data)
self.current = list()
def isompdirective(self, node):
return isstr(node) and node.value.startswith("omp ")
def visit_Expr(self, node):
if self.isompdirective(node.value):
self.current.append(node.value.value)
return None
else:
self.attach_data(node)
return node
def visit_If(self, node):
if self.isompdirective(node.test):
self.visit(ast.Expr(node.test))
return self.visit(ast.If(ast.Constant(1, None),
node.body, node.orelse))
else:
return self.attach_data(node)
def attach_data(self, node):
'''Generic method called for visit_XXXX() with XXXX in
GatherOMPData.statements list
'''
if self.current:
for curr in self.current:
md = OMPDirective(curr)
metadata.add(node, md)
self.current = list()
# add a Pass to hold some directives
for field_name, field in ast.iter_fields(node):
if field_name in GatherOMPData.statement_lists:
if(field and
isinstance(field[-1], ast.Expr) and
self.isompdirective(field[-1].value)):
field.append(ast.Pass())
self.generic_visit(node)
# add an If to hold scoping OpenMP directives
directives = metadata.get(node, OMPDirective)
field_names = {n for n, _ in ast.iter_fields(node)}
has_no_scope = field_names.isdisjoint(GatherOMPData.statement_lists)
if directives and has_no_scope:
# some directives create a scope, but the holding stmt may not
# artificially create one here if needed
sdirective = ''.join(d.s for d in directives)
scoping = ('parallel', 'task', 'section')
if any(s in sdirective for s in scoping):
metadata.clear(node, OMPDirective)
node = ast.If(ast.Constant(1, None), [node], [])
for directive in directives:
metadata.add(node, directive)
return node
|
serge-sans-paille/pythran
|
pythran/openmp.py
|
Python
|
bsd-3-clause
| 7,977
|
[
"VisIt"
] |
e7ade9a6692827edd98aed57e0bb689b1f60013843416d3fe9b0d2808f627490
|
"""
Methods related to importing data.
"""
import copy
import csv
import os
import shutil
import re
import subprocess
from tempfile import mkdtemp
from tempfile import mkstemp
from tempfile import NamedTemporaryFile
from BCBio import GFF
from Bio import Entrez
from Bio import SeqIO
from celery import task
from django.conf import settings
from django.db import transaction
import vcf
from main.celery_util import assert_celery_running
from main.exceptions import ValidationException
from main.models import Chromosome
from main.models import Dataset
from main.models import ExperimentSample
from main.models import ReferenceGenome
from main.models import VariantSet
from main.models import VariantToVariantSet
from main.model_utils import clean_filesystem_location
from main.model_utils import get_dataset_with_type
from main.s3 import project_files_needed
from pipeline.read_alignment_util import ensure_bwa_index
from pipeline.variant_effects import build_snpeff
from utils import generate_safe_filename_prefix_from_label
from utils import uppercase_underscore
from utils.genbank_util import generate_gbk_feature_index
from utils.jbrowse_util import prepare_jbrowse_ref_sequence
from utils.jbrowse_util import add_genbank_file_track
from variants.vcf_parser import get_or_create_variant
from variants.vcf_parser import update_filter_key_map
IMPORT_FORMAT_TO_DATASET_TYPE = {
'fasta': Dataset.TYPE.REFERENCE_GENOME_FASTA,
'genbank': Dataset.TYPE.REFERENCE_GENOME_GENBANK,
'gff': Dataset.TYPE.REFERENCE_GENOME_GFF,
'vcf_user': Dataset.TYPE.VCF_USERINPUT
}
SAMPLE_SERVER_COPY_KEY__SAMPLE_NAME = 'Sample_Name'
SAMPLE_SERVER_COPY_KEY__READ_1 = 'Read_1_Path'
SAMPLE_SERVER_COPY_KEY__READ_2 = 'Read_2_Path'
REQUIRED_SAMPLE_SERVER_COPY_HEADER = [
SAMPLE_SERVER_COPY_KEY__SAMPLE_NAME,
SAMPLE_SERVER_COPY_KEY__READ_1,
]
# Cols that we know about, to distinguish them from user-defined cols.
PRE_DEFINED_SAMPLE_SERVER_COPY_HEADER_PARTS = (
REQUIRED_SAMPLE_SERVER_COPY_HEADER +
[SAMPLE_SERVER_COPY_KEY__READ_2])
SAMPLE_BROWSER_UPLOAD_KEY__SAMPLE_NAME = 'Sample_Name'
SAMPLE_BROWSER_UPLOAD_KEY__READ_1 = 'Read_1_Filename'
SAMPLE_BROWSER_UPLOAD_KEY__READ_2 = 'Read_2_Filename'
REQUIRED_SAMPLE_UPLOAD_THROUGH_BROWSER_HEADER = [
SAMPLE_BROWSER_UPLOAD_KEY__SAMPLE_NAME,
SAMPLE_BROWSER_UPLOAD_KEY__READ_1,
]
# Cols that we know about, to distinguish them from user-defined cols.
PRE_DEFINED_SAMPLE_UPLOAD_THROUGH_BROWSER_PARTS = (
REQUIRED_SAMPLE_UPLOAD_THROUGH_BROWSER_HEADER +
[SAMPLE_BROWSER_UPLOAD_KEY__READ_2])
REQUIRED_VCF_HEADER_PART = ['CHROM', 'POS', 'ID', 'REF', 'ALT']
if settings.S3_ENABLED:
from main.s3 import s3_temp_get, s3_get
def import_reference_genome_from_s3(project, label, s3file, import_format):
with s3_temp_get(s3file) as f:
return import_reference_genome_from_local_file(
project, label, f, import_format)
@project_files_needed
def import_samples_from_s3(project, targets_file_rows, s3files):
tmp_dir = mkdtemp()
local_s3files_map = {}
for s3file in s3files:
filepath = os.path.join(tmp_dir, s3file.name)
s3_get(s3file.key, filepath)
local_s3files_map[s3file.name] = filepath
for row in targets_file_rows:
sample_label = row['Sample_Name']
experiment_sample = ExperimentSample.objects.create(
project=project, label=sample_label)
copy_and_add_dataset_source(experiment_sample, Dataset.TYPE.FASTQ1,
Dataset.TYPE.FASTQ1, local_s3files_map[row['Read_1_Path']])
if 'Read_2_Path' in row and row['Read_2_Path']:
copy_and_add_dataset_source(experiment_sample, Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQ2, local_s3files_map[row['Read_2_Path']])
shutil.rmtree(tmp_dir)
class DataImportError(Exception):
"""Exception thrown when there are errors in imported data.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'DataImportError: ' + str(self.msg)
@project_files_needed
def import_reference_genome_from_local_file(project, label, file_location,
import_format, move=False):
"""Creates a ReferenceGenome associated with the given Project.
Args:
project: The Project we're storing everyting relative to.
label: The human-readable label for the ReferenceGenome.
file_location: Location of the genome on the server.
import_format: Must be 'fasta' or 'genbank'.
move: move instead of copy the original file_location - for instance,
if we saved it to a temporary file. Moving is of course faster than
copying.
Returns:
ReferenceGenome.
"""
# Validate the input.
assert import_format in ['fasta', 'genbank']
# Get rid of any whitepspace.
file_location = file_location.strip()
# Validate the file.
assert os.path.exists(file_location), "File %s doesn't exist." % (
file_location)
# Validate the input by parsing it with BioPython, while also
# counting the number of chromosomes.
num_bases = 0
for genome_record in SeqIO.parse(file_location, import_format):
num_bases += len(genome_record)
# Make sure sequence exists.
if not num_bases > 0:
raise DataImportError("No sequence in file.")
# Create the ReferenceGenome object.
reference_genome = ReferenceGenome.objects.create(
project=project,
label=label)
# Copy the source file to the ReferenceGenome data location.
dataset_type = IMPORT_FORMAT_TO_DATASET_TYPE[import_format]
file_suffix = file_location.rsplit('/', 1)[-1]
# Substitute non-alphanumeric characters in filename with underscores
sanitized_file_suffix = re.sub('[\W]', '_', file_suffix)
# Move/copy file to reference genome model dir
dest_path = os.path.join(
reference_genome.get_model_data_dir(),
sanitized_file_suffix)
if move:
shutil.move(file_location, dest_path)
else:
shutil.copy(file_location, dest_path)
# Add the dataset to the reference genome
add_dataset_to_entity(
reference_genome,
label,
dataset_type,
dest_path)
return reference_genome
def add_chromosomes(reference_genome, dataset):
"""Makes a Chromosome for each unique SeqRecord.name in the dataset
"""
seqrecord_ids = [
chrom.seqrecord_id for chrom in
Chromosome.objects.filter(reference_genome=reference_genome)]
def _make_chromosome(seqrecord_iter):
for seqrecord in seqrecord_iter:
if seqrecord.id not in seqrecord_ids:
Chromosome.objects.create(
reference_genome=reference_genome,
label=seqrecord.id,
seqrecord_id=seqrecord.id,
num_bases=len(seqrecord))
dataset_path = dataset.get_absolute_location()
# Add chromosome labels and ids
if dataset.type == Dataset.TYPE.REFERENCE_GENOME_FASTA:
_make_chromosome(SeqIO.parse(dataset_path, "fasta"))
elif dataset.type == Dataset.TYPE.REFERENCE_GENOME_GENBANK:
_make_chromosome(SeqIO.parse(dataset_path, "genbank"))
elif dataset.type == Dataset.TYPE.REFERENCE_GENOME_GFF:
# Don't add chromosomes for GFF. Used internally with JBrowse.
return
elif dataset.type == Dataset.TYPE.FEATURE_INDEX:
# Don't add chromosomes for feature_index.
# Used internally to find features.
return
elif dataset.type == Dataset.TYPE.MOBILE_ELEMENT_FASTA:
# Don't add chromosomes for mobile elements. Used by SV calling.
return
else:
raise AssertionError("Unexpected Dataset type {ds_type}".format(
ds_type=dataset.type))
def generate_fasta_from_genbank(ref_genome):
"""If this reference genome has a genbank but not a FASTA, generate
a FASTA from the genbank. """
# If a FASTA already exists, then just return.
if ref_genome.dataset_set.filter(
type=Dataset.TYPE.REFERENCE_GENOME_FASTA).exists():
return
# Check that a genbank exists.
assert ref_genome.dataset_set.filter(
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).exists()
# Get genbank path and filename components (for creating FASTA file name).
genbank_path = get_dataset_with_type(
ref_genome,
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).get_absolute_location()
genbank_dir, genbank_filename = os.path.split(genbank_path)
genbank_noext = os.path.splitext(genbank_filename)[0]
# Put the fasta file in the same dir, just change the extension to .fa.
fasta_filename = os.path.join(genbank_dir, (genbank_noext + '.fa'))
# Get the individual records, each corresponding to a chromosome.
genome_records = list(SeqIO.parse(genbank_path, 'genbank'))
# SnpEFF takes the name attr, but the BioPython uses the id attr to make
# its fasta file, so overwrite the id with the name when converting to
# fasta. Jbrowse and SnpEFF go by the name.
for genome_record in genome_records:
genome_record.name = genome_record.id
SeqIO.write(genome_records, fasta_filename, 'fasta')
dataset_type = IMPORT_FORMAT_TO_DATASET_TYPE['fasta']
copy_and_add_dataset_source(ref_genome, dataset_type,
dataset_type, fasta_filename)
def ensure_fasta_index(ref_genome_fasta):
"""
Check if a fasta index is present w/ extension .fai. If not,
use samtools to generate one.
"""
if not os.path.exists(ref_genome_fasta + '.fai'):
subprocess.check_call([
settings.SAMTOOLS_BINARY,
'faidx',
ref_genome_fasta])
def generate_gff_from_genbank(ref_genome):
"""If this reference genome has a genbank but not a GFF, generate
a GFF from the genbank. """
# If a GFF already exists, then just return.
if ref_genome.dataset_set.filter(
type=Dataset.TYPE.REFERENCE_GENOME_GFF).exists():
return
# Check that a genbank exists.
assert ref_genome.dataset_set.filter(
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).exists()
# Get genbank path and filename components (for creating GFF file name).
genbank_path = get_dataset_with_type(
ref_genome,
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).get_absolute_location()
genbank_dir, genbank_filename = os.path.split(genbank_path)
genbank_noext = os.path.splitext(genbank_filename)[0]
# Put the GFF file in the same dir, just change the extension to .gff.
gff_filename = os.path.join(genbank_dir, (genbank_noext + '.gff'))
# Get the individual records, each corresponding to a chromosome.
genome_records = list(SeqIO.parse(genbank_path, 'genbank'))
# SnpEFF takes the name attr, but the BioPython uses the id attr to make its
# GFF file, so overwrite the id with the name when converting to GFF.
for genome_record in genome_records:
genome_record.name = genome_record.id
GFF.write(genome_records, open(gff_filename, 'w'))
dataset_type = IMPORT_FORMAT_TO_DATASET_TYPE['gff']
copy_and_add_dataset_source(ref_genome, dataset_type,
dataset_type, gff_filename)
def import_reference_genome_from_ncbi(project, label, record_id, import_format):
"""Imports a reference genome by accession from NCBI using efetch.
"""
# Validate the input.
assert import_format in ['fasta', 'genbank'], (
'Import Format must be \'fasta\' or \'genbank\'')
# Format keys for Efetch.
# More info at: http://www.ncbi.nlm.nih.gov/
# books/NBK25499/table/chapter4.chapter4_table1/?report=objectonly
CONVERT_FORMAT = {
'fasta': 'fa',
'genbank': 'gbwithparts'
}
# What suffix to use for each input format
# TODO: Should this be a property of the Dataset TYPE?
FORMAT_SUFFIX = {
'fasta': '.fa',
'genbank': '.gb'
}
Entrez.email = settings.EMAIL
handle = Entrez.efetch(
db="nuccore",
id=record_id,
rettype=CONVERT_FORMAT[import_format],
retmode="text")
# Store results in temporary file.
filename_prefix = generate_safe_filename_prefix_from_label(label) + '_'
temp = NamedTemporaryFile(delete=False, prefix=filename_prefix,
suffix=FORMAT_SUFFIX[import_format])
temp.write(handle.read())
handle.close()
temp.close()
# Create ref genome from this temporary file.
reference_genome = import_reference_genome_from_local_file(
project, label, temp.name, import_format, move=True)
# Clean up.
if os.path.isfile(temp.name):
os.remove(temp.name)
return reference_genome
def sanitize_record_id(record_id_string):
"""We want to grab only the first word-only part of each seqrecord in a
FASTA/Genbank file, and use that as a consistent and readable id between
genbank and FASTA.
"""
return re.match( r'^\w{1,20}', record_id_string).group()
def _assert_sample_targets_file_size(targets_file):
if hasattr(targets_file, "size"):
assert targets_file.size < 1000000, (
"Targets file is too large: %d" % targets_file.size)
@project_files_needed
def import_samples_from_targets_file(project, targets_file, options={}):
"""Uses the uploaded targets file to add a set of samples to the project.
We need to check each line of the targets file for consistency before we
do anything, however. Checking is moved to parse_targets_file() which parses
targets_file and returns valid rows. parse_targets_file() will also be
called from parse_targets_file_s3 in xhr_handlers in case of S3 uploading.
It writes a copy of the uploaded targets file to a temporary file
Args:
project: The project we're storing everything relative to>
targets_file: The UploadedFile django object that holds the targets
in .tsv format.
options: Dictionary of options. Currently a hack to allow different
parts of the pipeline to not run during tests (e.g. FastQC).
"""
assert_celery_running()
parsed_rows = parse_experiment_sample_targets_file(
project,
targets_file,
REQUIRED_SAMPLE_SERVER_COPY_HEADER,
SAMPLE_SERVER_COPY_KEY__SAMPLE_NAME,
SAMPLE_SERVER_COPY_KEY__READ_1,
SAMPLE_SERVER_COPY_KEY__READ_2)
# We perform the additional step of testing file locations, and for the
# test data, switching out $GD_ROOT template variable.
valid_rows = []
for row in parsed_rows:
updated_row = copy.copy(row)
for field, value in row.iteritems():
if field in (SAMPLE_SERVER_COPY_KEY__READ_1,
SAMPLE_SERVER_COPY_KEY__READ_2):
updated_value = value.replace('$GD_ROOT', settings.PWD)
with open(updated_value, 'rb') as test_file:
try:
test_file.read(8)
except:
raise AssertionError(
"Cannot read file at %s" % updated_value)
updated_row[field] = updated_value
valid_rows.append(updated_row)
return create_samples_from_row_data(project, valid_rows, move=False,
options=options)
def create_samples_from_row_data(
project, data_source_list, move=False, options={}):
"""Creates ExperimentSample objects along with their respective Datasets.
The data is copied to the entity location. We block until we've created the
models, and then go async for actual copying.
Args:
project: Project these Samples should be added to.
data_source_list: List of objects with keys:
* Sample_Name
* Read_1_Path
* Read_2_Path (optional)
* other metadata keys (optional)
* ...
move: Whether to move the source data. Else copy.
options: Dictionary of options. Currently a hack to allow different
parts of the pipeline to not run during tests (e.g. FastQC).
Returns:
List of ExperimentSamples.
"""
experiment_samples = []
for row in data_source_list:
# Create ExperimentSample object and then store the data relative to
# it.
sample_label = row['Sample_Name']
experiment_sample = ExperimentSample.objects.create(
project=project, label=sample_label)
# Create the Datasets before starting copying so we can show status in
# the ui. This is a new pattern where we are moving copying to happen
# asynchronously in Celery.
_create_fastq_dataset(
experiment_sample, row['Read_1_Path'], Dataset.TYPE.FASTQ1,
Dataset.STATUS.QUEUED_TO_COPY)
maybe_read2_path = row.get('Read_2_Path', '')
if maybe_read2_path:
_create_fastq_dataset(
experiment_sample, maybe_read2_path, Dataset.TYPE.FASTQ2,
Dataset.STATUS.QUEUED_TO_COPY)
# Add extra metadata columns.
_update_experiment_sample_data_for_row(experiment_sample, row,
PRE_DEFINED_SAMPLE_SERVER_COPY_HEADER_PARTS)
# Start the async job of copying.
copy_experiment_sample_data.delay(
project, experiment_sample, row, move=move, options=options)
experiment_samples.append(experiment_sample)
_update_experiment_sample_parentage(experiment_samples)
return experiment_samples
def _create_fastq_dataset(experiment_sample, fastq_source, dataset_type,
dataset_status):
"""Helper function for creating a Dataset that will point to a file.
Since clients of this function are responsible for actuallying copying the
data, this function sets the is_present bit to False on the Dataset.
"""
fastq_dest = _get_copy_target_path(experiment_sample, fastq_source)
reads_dataset = add_dataset_to_entity(experiment_sample,
dataset_type, dataset_type, fastq_dest)
reads_dataset.status = dataset_status
reads_dataset.save()
return reads_dataset
def _copy_dataset_data(experiment_sample, fastq_source, dataset_type,
move=False, set_status=Dataset.STATUS.VERIFYING):
"""Helper to copy data and set status.
"""
dataset = experiment_sample.dataset_set.get(type=dataset_type)
dataset.status = Dataset.STATUS.COPYING
dataset.save()
copy_dataset_to_entity_data_dir(experiment_sample, fastq_source, move=move)
dataset.status = set_status
dataset.save()
return dataset
@task
@project_files_needed
def copy_experiment_sample_data(
project, experiment_sample, data, move=False,
options={'skip_fastqc': False}):
"""Celery task that wraps the process of copying the data for an
ExperimentSample.
"""
# Copy read1.
read1_dataset = _copy_dataset_data(experiment_sample, data['Read_1_Path'],
Dataset.TYPE.FASTQ1, move=move)
# Copy read2.
maybe_read2_path = data.get('Read_2_Path', '')
if maybe_read2_path:
read2_dataset = _copy_dataset_data(experiment_sample, maybe_read2_path,
Dataset.TYPE.FASTQ2, move=move)
else:
read2_dataset = None
# Verification.
if read2_dataset is not None:
# Paired reads.
if (read1_dataset.filesystem_location ==
read2_dataset.filesystem_location):
# Make sure the files are not the same.
read1_dataset.status = Dataset.STATUS.FAILED
read2_dataset.status = Dataset.STATUS.FAILED
# TODO: Provide way for user to get an error message, similar to
# how make an error link for alignments.
else:
read1_dataset.status = Dataset.STATUS.READY
read1_dataset.save()
read2_dataset.status = Dataset.STATUS.READY
read2_dataset.save()
else:
# Unpaired.
read1_dataset.status = Dataset.STATUS.READY
read1_dataset.save()
# Quality Control via FASTQC and save.
read1_dataset.status = Dataset.STATUS.QC
read1_dataset.save()
if not options.get('skip_fastqc', False):
run_fastqc_on_sample_fastq(experiment_sample, read1_dataset)
read1_dataset.status = Dataset.STATUS.READY
read1_dataset.save()
if read2_dataset is not None:
read2_dataset.status = Dataset.STATUS.QC
read1_dataset.save()
if not options.get('skip_fastqc', False):
run_fastqc_on_sample_fastq(experiment_sample, read2_dataset,
rev=True)
read2_dataset.status = Dataset.STATUS.READY
read2_dataset.save()
@task
def run_fastqc_on_sample_fastq(
experiment_sample, source_fastq_dataset, rev=False,
source_dataset_status_on_success=None):
"""Runs FASTQC on a fastq dataset object.
Args:
experiment_sample: The ExperimentSample for this fastq.
fastq_dataset: Dataset that points to uploaded fastq file.
rev: If True, this is the FASTQ2.
source_dataset_status_on_success: Status to set on fastq_dataset upon
success.
Returns:
New Dataset pointing to html file of FastQC results.
"""
fastq_filename = source_fastq_dataset.get_absolute_location()
# There's no option to pass the output filename to FastQC so we just
# create the name that matches what FastQC outputs.
fastqc_filename = _get_fastqc_path(source_fastq_dataset)
if rev:
dataset_type = Dataset.TYPE.FASTQC2_HTML
else:
dataset_type = Dataset.TYPE.FASTQC1_HTML
# create the tmp dir if it doesn't exist
if not os.path.exists(settings.TEMP_FILE_ROOT):
os.mkdir(settings.TEMP_FILE_ROOT)
command = [
settings.FASTQC_BINARY,
fastq_filename,
'-o', experiment_sample.get_model_data_dir(),
'-d', settings.TEMP_FILE_ROOT]
fastqc_output = subprocess.check_output(
command, stderr=subprocess.STDOUT)
# Check that fastqc file has been made
# TODO: We need proper error checking and logging probably, so that this
# non-essential step doesn't destroy the whole import process.
if not os.path.exists(fastqc_filename):
print 'FastQC Failed for {}:\n{}'.format(
fastq_filename, fastqc_output)
set_phred_encoding(fastqc_filename, experiment_sample)
fastqc_dataset = add_dataset_to_entity(experiment_sample,
dataset_type, dataset_type, fastqc_filename)
fastqc_dataset.status = Dataset.STATUS.READY
fastqc_dataset.save(update_fields=['status'])
if source_dataset_status_on_success is not None:
source_fastq_dataset.status = source_dataset_status_on_success
source_fastq_dataset.save(update_fields=['status'])
return fastqc_dataset
def _get_fastqc_path(fastq_dataset):
"""Returns fastqc filename given Dataset pointing to fastq.
"""
fastq_filename = fastq_dataset.get_absolute_location()
if fastq_dataset.is_compressed():
unzipped_fastq_filename = os.path.splitext(fastq_filename)[0]
else:
unzipped_fastq_filename = fastq_filename
# NOTE: FASTQC apparently has slightly different behavior when the file
# extension is.fastqc where it chops off the .fastqc part so we have to do
# that here manually too.
if os.path.splitext(unzipped_fastq_filename)[1] == '.fastq':
unzipped_fastq_filename = os.path.splitext(unzipped_fastq_filename)[0]
return unzipped_fastq_filename + '_fastqc.html'
def set_phred_encoding(fastqc_filename, experiment_sample):
fastqc_filename_base = os.path.splitext(fastqc_filename)[0]
fastqc_data_zip_path = fastqc_filename_base + '.zip'
zip_archive_text_file = (fastqc_filename_base.split('/')[-1] +
'/fastqc_data.txt')
if not os.path.exists(fastqc_data_zip_path):
experiment_sample.data['phred_encoding'] = None
else:
unzip_process = subprocess.Popen([
'unzip', '-ca',
fastqc_data_zip_path,
zip_archive_text_file],
stdout=subprocess.PIPE)
output = subprocess.check_output(
['grep', '-m', '1', 'Encoding'],
stdin=unzip_process.stdout)
encoding = output.strip().split('\t')[1]
experiment_sample.data['phred_encoding'] = encoding
experiment_sample.save()
def create_sample_models_for_eventual_upload(project, targets_file):
"""Parses the form to create sample placeholers that are awaiting
data upload.
Args:
targets_file: The filled out form.
Raises:
ValidationException if validation fails.
"""
try:
valid_rows = parse_experiment_sample_targets_file(
project,
targets_file,
REQUIRED_SAMPLE_UPLOAD_THROUGH_BROWSER_HEADER,
SAMPLE_BROWSER_UPLOAD_KEY__SAMPLE_NAME,
SAMPLE_BROWSER_UPLOAD_KEY__READ_1,
SAMPLE_BROWSER_UPLOAD_KEY__READ_2)
except AssertionError as e:
raise ValidationException(e)
for row in valid_rows:
_create_sample_and_placeholder_dataset(project, row)
def _create_sample_and_placeholder_dataset(project, row):
"""Create Datasets but don't copy data.
"""
# Parsing and validation.
fastq1_filename = row['Read_1_Filename']
maybe_fastq2_filename = row.get('Read_2_Filename', '')
assert fastq1_filename != maybe_fastq2_filename
# Now create the models.
experiment_sample = ExperimentSample.objects.create(
project=project, label=row['Sample_Name'])
fastq1_filename = row['Read_1_Filename']
_create_fastq_dataset(
experiment_sample, fastq1_filename, Dataset.TYPE.FASTQ1,
Dataset.STATUS.AWAITING_UPLOAD)
if maybe_fastq2_filename:
_create_fastq_dataset(
experiment_sample, maybe_fastq2_filename,
Dataset.TYPE.FASTQ2, Dataset.STATUS.AWAITING_UPLOAD)
# Add extra metadata columns.
_update_experiment_sample_data_for_row(experiment_sample, row,
PRE_DEFINED_SAMPLE_UPLOAD_THROUGH_BROWSER_PARTS)
def _update_experiment_sample_data_for_row(experiment_sample, row, known_cols):
"""Updates the catch-all ExperimentSample.data field with user-defined
fields.
"""
for field, value in row.iteritems():
if field not in known_cols:
clean_field = uppercase_underscore(field)
if not clean_field.startswith('SAMPLE_'):
clean_field = 'SAMPLE_' + clean_field
experiment_sample.data[clean_field] = str(value)
experiment_sample.save(update_fields=['data'])
def _update_experiment_sample_parentage(experiment_samples):
"""
Adds children/parent relations to ExperimentSamples according to a
dictionary of parents parsed out of the targets file.
"""
es_label_dict = dict([(es.label, es) for es in experiment_samples])
# make a dict of child to parent labels.
parent_dict = {}
for es in experiment_samples:
if 'SAMPLE_PARENTS' in es.data:
parents = es.data['SAMPLE_PARENTS'].split('|')
parent_dict[es.label] = parents
# add the children of each parent to the model.
for child, parents in parent_dict.items():
assert child in es_label_dict.keys(), (
'Child {} missing from ExperimentSamples'.format(child))
for parent in parents:
if not parent: continue #skip blank strings
assert parent in es_label_dict.keys(), (
'Parent {} missing from ExperimentSamples'.format(parent))
es_label_dict[parent].add_child(es_label_dict[child])
def parse_experiment_sample_targets_file(project,
targets_filehandle_or_filename, required_header, sample_name_key,
read_1_key, read_2_key):
"""Parses and validates the file.
Returns:
List of objects representing the rows.
"""
_assert_sample_targets_file_size(targets_filehandle_or_filename)
# The purpose of the next few lines of somewhat convoluted code is to
# make sure we support weird template formats such as Excel on OsX might
# output. In the end, we want to end up with the variable targets_file
# being a File object that has been read in in universal mode,
# open(..., 'rU'). This requirement is made slightly trickier by the fact
# that the aptly named param targets_filehandle_or_filename is of ambiguous
# type (because Python) and so the remaining code needs to work whether
# it's a string filename, or a File object. One way we can solve all
# these constraints is to write the contents of the file to a temporary
# location, and then read it back in universal mode. I would welcome a more
# elegant fix.
if isinstance(targets_filehandle_or_filename, str):
temp_file_location = targets_filehandle_or_filename
else:
# It's an open File object.
if not os.path.exists(settings.TEMP_FILE_ROOT):
os.mkdir(settings.TEMP_FILE_ROOT)
_, temp_file_location = mkstemp(dir=settings.TEMP_FILE_ROOT)
with open(temp_file_location, 'w') as temp_fh:
temp_fh.write(targets_filehandle_or_filename.read())
# Identify delim (comma or tab). Might raise AssertionError.
delim = determine_template_delimiter(temp_file_location, required_header)
targets_file = open(temp_file_location, 'rU')
# Proceed with appropriate delim.
reader = csv.DictReader(targets_file, delimiter=delim)
# Read the header / schema.
targets_file_header = reader.fieldnames
# Make sure all header cols are present.
missing_header_cols = (set(required_header) - set(targets_file_header))
assert 0 == len(missing_header_cols), (
"Missing cols: %s" % ' '.join(missing_header_cols))
# Query all relevant datasets to check for filename clashes.
existing_sample_dataset_filename_set = set([
os.path.split(ds.filesystem_location)[1]
for ds in Dataset.objects.filter(
experimentsample__project=project)])
# Set this to a boolean on the first iteration, and make sure all rows
# are either paired or unpaired.
is_paired_end = None
# Initial aggregation and validation.
valid_rows = []
for raw_row_obj in reader:
clean_row_obj = {}
for key, value in raw_row_obj.iteritems():
# Ignore rows of the form K/V pair {None: ''}
if key is None:
continue
clean_row_obj[key] = value.strip()
sample_name = clean_row_obj[sample_name_key]
if not sample_name:
# Null sample name, skip the row.
continue
assert len(targets_file_header) == len(clean_row_obj.keys()), (
"Row %s has the wrong number of fields." % sample_name)
# Determine whether paired-end data (first iteration only).
if is_paired_end is None:
is_paired_end = (read_2_key in clean_row_obj and
clean_row_obj[read_2_key])
# Check filenames are present.
assert clean_row_obj[read_1_key], (
"No read 1 in row %s" % sample_name)
if is_paired_end:
assert clean_row_obj[read_2_key], (
"No read 2 in row %s" % sample_name)
# Catch a common copy paste error where read1 matches read2.
if is_paired_end:
same = (clean_row_obj[read_1_key] == clean_row_obj[read_2_key])
assert not same, "Read 1 filename is same as read 2 filename"
# Make sure Dataset with that name doesn't exist.
def _assert_not_filename_exists(filename_col):
filename = os.path.basename(clean_row_obj[filename_col])
assert not filename in existing_sample_dataset_filename_set, (
"%s exists" % clean_row_obj[filename_col])
_assert_not_filename_exists(read_1_key)
if is_paired_end:
_assert_not_filename_exists(read_2_key)
valid_rows.append(clean_row_obj)
# Make sure all the standard fields have unique values relative to each
# other.
def _assert_no_repeated_value(col):
values = set([row[col] for row in valid_rows])
assert len(values) == len(valid_rows), (
"Non-unique %s detected." % col)
_assert_no_repeated_value(sample_name_key)
_assert_no_repeated_value(read_1_key)
if is_paired_end:
_assert_no_repeated_value(read_2_key)
targets_file.close()
os.remove(temp_file_location)
return valid_rows
def determine_template_delimiter(
template_file_location, required_header):
"""Determine the file delimiter. Either comma or tab.
Returns delimiter or raises AssertionError if missing header cols.
"""
def _get_missing_cols(delim):
with open(template_file_location, 'rU') as fh:
test_reader = csv.DictReader(fh, delimiter=delim)
test_reader.fieldnames
return (set(required_header) - set(test_reader.fieldnames))
delim = ','
missing_cols_comma = _get_missing_cols(delim)
if len(missing_cols_comma):
delim = '\t'
missing_cols_tab = _get_missing_cols(delim)
if len(missing_cols_tab):
# Neither are good. Return best guess at missing cols.
guess_appropriate_missing_cols = missing_cols_comma
if len(missing_cols_tab) < len(missing_cols_comma):
guess_appropriate_missing_cols = missing_cols_tab
raise AssertionError(
"Invalid template or, missing columns: %s" % ' '.join(
guess_appropriate_missing_cols))
return delim
@transaction.commit_on_success
def import_variant_set_from_vcf(ref_genome, variant_set_name,
variant_set_file):
"""Convert an uploaded VCF file into a new variant set object.
Args:
ref_genome: ReferenceGenome.
variant_set_name: Name of the variant set (label).
variant_set_file: Path to the variant set on disk.
"""
# For now, variant set name must be unique even among diff ref genomes.
variant_set_name_exists = bool(VariantSet.objects.filter(
reference_genome=ref_genome,
label=variant_set_name).count())
assert not variant_set_name_exists, 'Variant set name must be unique.'
# Create the VariantSet.
variant_set = VariantSet.objects.create(
reference_genome=ref_genome,
label=variant_set_name)
# First, save this vcf as a dataset, so we can point to it from the
# new variant common_data_objs
dataset_type = IMPORT_FORMAT_TO_DATASET_TYPE['vcf_user']
dataset = copy_and_add_dataset_source(variant_set, dataset_type,
dataset_type, variant_set_file)
# Now try read the variant set file using PyVCF. If this throws
# an error, for example because the vcf comes from our 'upload from file'
# interface which does not make INFO or FORMAT columns, use the more
# forgiving _read_variant_file_as_csv.
# Perhaps rethink the 'upload from file' interface to generate
# Null values for these columns
# _read_variant_set_file(variant_set_file, ref_genome, dataset,
# variant_set)
try:
_read_variant_set_file(variant_set_file, ref_genome, dataset,
variant_set)
except Exception:
_read_variant_set_file_as_csv(variant_set_file, ref_genome, dataset,
variant_set)
# These actions invalidate the materialized view.
ref_genome.invalidate_materialized_view()
def _read_variant_set_file(variant_set_file, ref_genome, dataset,
variant_set):
# First count the number of records to give helpful status debug output.
record_count = 0
with open(variant_set_file) as fh:
vcf_reader = vcf.Reader(fh)
for record in vcf_reader:
record_count += 1
# Now iterate through the vcf file again and parse the data.
# NOTE: Do not save handles to the Variants, else suffer the wrath of a
# memory leak when parsing a large vcf file.
with open(variant_set_file) as fh:
vcf_reader = vcf.Reader(fh)
# First, update the reference_genome's key list with any new
# keys from this VCF.
reference_genome = ReferenceGenome.objects.get(id=ref_genome.id)
# Update the reference genome and grab it from the db again.
update_filter_key_map(reference_genome, vcf_reader)
for record_idx, record in enumerate(vcf_reader):
print 'vcf_parser: Parsing %d out of %d' % (
record_idx + 1, record_count)
# Get or create the Variant for this record. This step
# also generates the alternate objects and assigns their
# data fields as well.
variant, alt_list = get_or_create_variant(reference_genome,
record, dataset)
# Create a link between the Variant and the VariantSet if
# it doesn't exist.
VariantToVariantSet.objects.get_or_create(
variant=variant,
variant_set=variant_set)
def _read_variant_set_file_as_csv(variant_set_file, reference_genome,
dataset, variant_set):
"""If reading the variant set file as a vcf fails (because we arent using
all columns, as will usually be the case) then read it as a CSV and check
manually for the required columns.
Args:
* variant_set_file: Path to vcf file.
* reference_genome: ReferenceGenome object.
"""
# NOTE: Must open with 'rU', universal mode, to handle non-standard
# linebreaks that might be introduced in different environments. For
# example, Excel on Mac OS X saves funky linebreaks.
with open(variant_set_file, 'rU') as fh:
# Use this wrapper to skip the header lines
# Double ##s are part of the header, but single #s are column
# headings and must be stripped and kept.
def remove_vcf_header(iterable):
for line in iterable:
if not line.startswith('##'):
if line.startswith('#'):
line = line.lstrip('#')
yield line
vcf_noheader = remove_vcf_header(fh)
reader = csv.DictReader(vcf_noheader, delimiter='\t')
# Check that the required columns are present.
assert (len(reader.fieldnames) >= len(REQUIRED_VCF_HEADER_PART)), (
'Header for PseudoVCF %s is too short, should have [%s], has %s' % (
variant_set_file, ', '.join(REQUIRED_VCF_HEADER_PART),
', '.join(reader.fieldnames)))
for col, check in zip(reader.fieldnames[0:len(REQUIRED_VCF_HEADER_PART)],
REQUIRED_VCF_HEADER_PART):
assert col == check, (
"Header column '%s' is missing or out of order; %s" % (check,
', '.join(reader.fieldnames)))
class PseudoVCF:
"""Pseudo wrapper class to satisfy interface of
extract_raw_data_dict().
"""
def __init__(self, **entries):
self.__dict__.update(entries)
self.__dict__['ALT'] = self.__dict__['ALT'].strip().split(',')
self.__dict__['samples'] = []
for record in reader:
record = PseudoVCF(**record)
# Get or create the Variant for this record.
# NOTE: No samples so query_cache is not necessary.
variant, alts = get_or_create_variant(
reference_genome, record, dataset, query_cache=None)
# Create a link between the Variant and the VariantSet if
# it doesn't exist.
VariantToVariantSet.objects.get_or_create(
variant=variant,
variant_set=variant_set)
##############################################################################
# Helper Functions
##############################################################################
def copy_and_add_dataset_source(entity, dataset_label, dataset_type,
original_source_location, move=False):
"""Copies the dataset to the entity location and then adds as
Dataset. If the original_source_location is a file object, then
it just read()s from the handle and writes to destination.
If move is true, move instead of copying it. Good for files downloaded
to a temp directory, since copying is slower.
The model entity must satisfy the following interface:
* property dataset_set
* method get_model_data_dir()
Returns:
The Dataset object.
"""
dest = copy_dataset_to_entity_data_dir(entity, original_source_location,
move)
dataset = add_dataset_to_entity(entity, dataset_label, dataset_type,
dest)
# First create the dataset and set copying status on it, dont set
# the filesystem location.
# dest = _get_copy_target_path(entity, original_source_location)
# dataset = add_dataset_to_entity(entity, dataset_label, dataset_type, dest)
# actual_dest = copy_dataset_to_entity_data_dir(
# entity, original_source_location, move=move)
# assert actual_dest == dest, "If this fails, there's a bug."
return dataset
def _get_copy_target_path(entity, original_source_location):
"""Returns the full path to the copy target.
Args:
entity: Model entity from which we determine the target dir.
original_source_location: Original location from which we determine a
filename.
Returns:
String describing full target path.
"""
assert hasattr(entity, 'get_model_data_dir')
source_name = os.path.split(original_source_location)[1]
return os.path.join(entity.get_model_data_dir(), source_name)
def copy_dataset_to_entity_data_dir(entity, original_source_location,
move=False):
"""If a file path, copy the data to the entity model data dir.
If a handle, then just write it to the data dir.
Returns:
The destination to which the file was copied.
"""
dest = _get_copy_target_path(entity, original_source_location)
if not original_source_location == dest:
try: #first try path
if move:
shutil.move(original_source_location, dest)
else:
shutil.copy(original_source_location, dest)
except TypeError: #then try a handle
open(dest,'w').write(
original_source_location.read())
return dest
def add_dataset_to_entity(entity, dataset_label, dataset_type,
filesystem_location=None):
"""Helper function for adding a Dataset to a model.
"""
dataset = Dataset.objects.create(
label=dataset_label, type=dataset_type)
if filesystem_location is not None:
dataset.filesystem_location = clean_filesystem_location(
filesystem_location)
dataset.save()
entity.dataset_set.add(dataset)
entity.save()
return dataset
def prepare_ref_genome_related_datasets(ref_genome, dataset):
"""Prepares data related to a ReferenceGenome.
For example, if only Genbank exists, creates a Fasta Dataset.
If related Datasets exists, this function is a no-op.
Args:
ref_genome: ReferenceGenome.
dataset: A dataset pointing to a genome.
Raises:
AssertionError if dataset status is NOT_STARTED.
"""
assert dataset.status != Dataset.STATUS.NOT_STARTED
if dataset.type == Dataset.TYPE.REFERENCE_GENOME_FASTA:
# make sure the fasta index is generated
# Run jbrowse ref genome processing
prepare_jbrowse_ref_sequence(ref_genome)
elif dataset.type == Dataset.TYPE.REFERENCE_GENOME_GENBANK:
# Run snpeff build after creating ReferenceGenome obj.
build_snpeff(ref_genome)
# These functions are NO-OPS if the respective Datasets exist.
generate_fasta_from_genbank(ref_genome)
generate_gff_from_genbank(ref_genome)
# Run jbrowse genbank genome processing for genes
add_genbank_file_track(ref_genome)
# Create an indexed set of intervals so we can find contigs
# and snps within genes without using snpEFF.
feature_index_output_path = os.path.join(
ref_genome.get_snpeff_genbank_parent_dir(),
'gbk_feature_idx.pickle')
generate_gbk_feature_index(
ref_genome.get_snpeff_genbank_file_path(),
feature_index_output_path)
gbk_idx_dataset = Dataset.objects.create(
label=Dataset.TYPE.FEATURE_INDEX,
type=Dataset.TYPE.FEATURE_INDEX)
gbk_idx_dataset.filesystem_location = feature_index_output_path
gbk_idx_dataset.save()
ref_genome.dataset_set.add(gbk_idx_dataset)
# We create the bwa index once here, so that alignments running in
# parallel don't step on each others' toes.
ref_genome_fasta = get_dataset_with_type(ref_genome,
Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
ensure_bwa_index(ref_genome_fasta)
def sanitize_sequence_dataset(dataset):
dataset_type_to_parse_format = {
Dataset.TYPE.REFERENCE_GENOME_FASTA: 'fasta',
Dataset.TYPE.REFERENCE_GENOME_GENBANK: 'genbank'
}
if dataset.type not in dataset_type_to_parse_format:
return
dirty_file_path = dataset.get_absolute_location()
parse_format = dataset_type_to_parse_format[dataset.type]
needs_santizing = False
with open(dirty_file_path, 'r') as dirty_fh:
for seq_record in SeqIO.parse(dirty_fh, parse_format):
if len(seq_record.id) > 16:
needs_santizing = True
break
if not needs_santizing:
return
prefix, ext = os.path.splitext(dirty_file_path)
clean_file_path = prefix + '.clean' + ext
seq_record_list = []
with open(dirty_file_path, 'r') as dirty_fh:
for seq_record in SeqIO.parse(dirty_fh, parse_format):
seq_record.id = seq_record.id[:16]
seq_record.name = seq_record.id
seq_record_list.append(seq_record)
with open(clean_file_path, 'w') as clean_fh:
SeqIO.write(seq_record_list, clean_fh, parse_format)
dataset.filesystem_location = clean_filesystem_location(clean_file_path)
dataset.save()
|
churchlab/millstone
|
genome_designer/utils/import_util.py
|
Python
|
mit
| 47,017
|
[
"BWA",
"Biopython"
] |
0e072ec80f4e8dc4bf366924ecaf4b3f142d41aeba8460d84196d039ef471851
|
import logging
from warnings import warn
import math
import numpy as np
import shlex
import parmed.unit as units
from intermol.atom import Atom
from intermol.forces import *
import intermol.forces.forcefunctions as ff
from intermol.exceptions import (UnimplementedFunctional, UnsupportedFunctional,
UnimplementedSetting, UnsupportedSetting,
DesmondError, InterMolError)
from intermol.molecule import Molecule
from intermol.moleculetype import MoleculeType
from intermol.system import System
from intermol.desmond import cmap_parameters
#MRS for old desmond functionality
import re
import copy
logger = logging.getLogger('InterMolLog')
ENGINE = 'desmond'
# driver helper functions
def load(cms_file):
"""Load a DESMOND input file into a 'System'
Args:
cms_file:
include_dir:
Returns:
system:
"""
parser = DesmondParser(cms_file)
return parser.read()
def save(cms_file, system):
"""Unpacks a 'System' into a DESMOND input file
Args:
cms_file:
system:
"""
parser = DesmondParser(cms_file, system)
return parser.write()
# parser helper functions
def end_header_section(blank_section, header, header_lines):
if blank_section:
header_lines = list()
header_lines.append(header)
header_lines.append(' :::\n')
else:
header_lines[0] = header
return header_lines
def split_with_quotes(line):
if '"' in line:
elements = shlex.split(line)
for e in elements:
e.replace(' ','_')
else:
elements = line.split()
return elements
def create_lookup(forward_dict):
return dict((v, k) for k, v in forward_dict.items())
def create_type(forward_dict):
return dict((k, eval(v.__name__ + 'Type')) for k, v in forward_dict.items())
class DesmondParser(object):
"""
A class containing methods required to read in a Desmond CMS File
"""
# 'lookup_*' is the inverse dictionary typically used for writing
desmond_combination_rules = {'1': 'Multiply-C6C12',
'2': 'Lorentz-Berthelot',
'3': 'Multiply-Sigeps'
}
lookup_desmond_combination_rules = create_lookup(desmond_combination_rules)
desmond_pairs = {'LJ12_6_SIG_EPSILON': LjSigepsPair,
'LJ': LjDefaultPair,
'COULOMB': LjDefaultPair
}
lookup_desmond_pairs = create_lookup(desmond_pairs) # not unique
desmond_pair_types = create_type(desmond_pairs)
desmond_bonds = {'HARM_CONSTRAINED': HarmonicBond,
'HARM': HarmonicBond
}
lookup_desmond_bonds = create_lookup(desmond_bonds) # not unique - revisit.
desmond_bond_types = create_type(desmond_bonds)
def canonical_bond(self, bond, params, direction='into', name=None):
if direction == 'into':
canonical_force_scale = self.canonical_force_scale_into
phase = 'Read'
else:
try:
name = self.lookup_desmond_bonds[bond.__class__] # check to make sure this OK given the c
except:
raise UnsupportedFunctional(bond, ENGINE)
canonical_force_scale = self.canonical_force_scale_from
phase = 'Write'
names = []
paramlists = []
if bond.__class__ in [HarmonicBond, HarmonicPotentialBond]:
if direction == 'into':
bond.k *= canonical_force_scale
if name == 'HARM_CONSTRAINED':
bond.c = True
elif name == 'HARM':
bond.c = False
else:
warn("ReadError: Found unsupported bond in Desmond {:s}".format(name))
return bond
else:
params['k'] *= canonical_force_scale
# harmonic potentials in Gromacs should be constrained (??: check what this means)
name = 'HARM'
if hasattr(bond,'c'):
if getattr(bond,'c') and not isinstance(bond, HarmonicPotentialBond):
name = 'HARM_CONSTRAINED'
names.append(name)
paramlists.append(params)
return names, paramlists
desmond_angles = {'HARM_CONSTRAINED': HarmonicAngle,
'HARM': HarmonicAngle,
'UB': UreyBradleyNoharmAngle
}
lookup_desmond_angles = create_lookup(desmond_angles)
desmond_angle_types = create_type(desmond_angles)
def canonical_angle(self, angle, params, direction='into', name=None,
molecule_type=None):
"""
Args:
name:
kwds:
angle:
direction: 'into' means into the canonical form, 'from' means from the
canonical form into Desmond
current molecule type (would like to be able to get rid of this, but need it to search angles for now
Returns:
modified list of keywords and names
"""
if direction == 'into':
canonical_force_scale = self.canonical_force_scale_into
else:
# we'd like to automate this, but currently have to state explicitly.
if angle.__class__ not in [HarmonicAngle, UreyBradleyAngle]:
raise UnsupportedFunctional(angle, ENGINE)
canonical_force_scale = self.canonical_force_scale_from
phase = 'Write'
names = []
paramlists = []
if angle.__class__ in [HarmonicAngle, UreyBradleyAngle, UreyBradleyNoharmAngle]:
if direction == 'into':
if angle.__class__ in [UreyBradleyAngle, UreyBradleyNoharmAngle]:
angle.kUB *= canonical_force_scale
if angle.__class__ in [UreyBradleyAngle, HarmonicAngle]:
angle.k *= canonical_force_scale
if name == 'HARM_CONSTRAINED': # this needs to go first because HARM is a substring
angle.c = True
elif name == 'HARM':
angle.c = False
else:
params['k'] = canonical_force_scale * params['k']
name = 'HARM'
if hasattr(angle,'c'):
if getattr(angle,'c'):
name = 'HARM_CONSTRAINED'
if direction == 'into' and angle.__class__ in [UreyBradleyNoharmAngle,HarmonicAngle]:
if angle.__class__ == UreyBradleyNoharmAngle:
# Urey-Bradley is implemented in DESMOND differently, with the
# terms implemented in a new angle term independent of the harmonic term.
# Instead, we will add everything together afterwards into a single term
angle = self.create_forcetype(UreyBradleyAngle,[angle.atom1,angle.atom2,angle.atom3],
[0,0,angle.r._value,angle.kUB._value]) # this seems kludgy
# next, find if we already have this angle somewhere
matched_angle = molecule_type.match_angles(angle)
if matched_angle: # we found one, if false, we haven't seen it yet, we'll add later
if matched_angle.__class__ == HarmonicAngle:
angle.k = matched_angle.k
angle.theta = matched_angle.theta
molecule_type.angle_forces.remove(matched_angle)
elif angle.__class__ == HarmonicAngle:
matched_angle = molecule_type.match_angles(angle)
if matched_angle and matched_angle.__class__ == UreyBradleyAngle:
# just copy over the information into the old angle.
matched_angle.k = angle.k
matched_angle.theta = angle.theta
angle = None
elif direction == 'from' and angle.__class__ in [UreyBradleyAngle]:
params_harmpart = {k:v for (k,v) in params.items() if k in ['theta','k','c'] }
names.append(name)
paramlists.append(params_harmpart)
name = 'UB'
params['kUB'] *= canonical_force_scale
params_ubpart = {k:v for (k,v) in params.items() if k in ['r','kUB'] }
names.append(name)
paramlists.append(params_ubpart)
else:
if direction == 'from':
names.append(name)
paramlists.append(params)
if direction == 'into':
return angle
elif direction == 'from':
return names, paramlists
else:
raise UnsupportedFunctional(angle, ENGINE)
desmond_dihedrals = {'IMPROPER_HARM': ImproperHarmonicDihedral,
'PROPER_TRIG': TrigDihedral,
'IMPROPER_TRIG': TrigDihedral,
'OPLS_PROPER': TrigDihedral,
'OPLS_IMPROPER': TrigDihedral
}
lookup_desmond_dihedrals = {TrigDihedral: 'PROPER_TRIG',
ImproperHarmonicDihedral: 'IMPROPER_HARM'
}
lookup_desmond_dihedral = create_lookup(desmond_dihedrals)
desmond_dihedral_types = create_type(desmond_dihedrals)
def canonical_dihedral(self, dihedral, params, direction = 'into', name = None, molecule_type = None):
if direction == 'into':
canonical_force_scale = self.canonical_force_scale_into
phase = 'Read'
else:
try:
name = self.lookup_desmond_dihedrals[dihedral.__class__]
except:
raise UnsupportedFunctional(dihedral, ENGINE)
canonical_force_scale = self.canonical_force_scale_from
phase = 'Write'
if dihedral.__class__ in [ImproperHarmonicDihedral, TrigDihedral]:
if direction == 'into':
#Improper Diehdral 2 ---NOT SURE ABOUT MULTIPLICITY
if name == "IMPROPER_HARM":
dihedral.improper = True
elif name == "PROPER_TRIG" or name == "IMPROPER_TRIG":
if name == "IMPROPER_TRIG":
dihedral.improper = True
else:
dihedral.improper = False
elif name == "OPLS_PROPER" or name == "OPLS_IMPROPER":
# OPLS_IMPROPER actually isn't any different from OPLS_PROPER
dihedral.improper = False
try:
# we can have multiple parameters with DESMOND, and append if we do
dihedralmatch = molecule_type.match_dihedrals(dihedral)
# this will fail if it's the wrong type of dihedral
if dihedralmatch:
dihedralmatch.sum_parameters(dihedral)
except Exception as e:
logger.exception(e)
return dihedral
else:
names = []
paramlists = []
if dihedral.__class__ in [ImproperHarmonicDihedral]:
params['k'] = params['k'] * canonical_force_scale
name = 'IMPROPER_HARM'
elif dihedral.__class__ in [TrigDihedral]:
name = 'PROPER_TRIG'
if hasattr(dihedral,'improper'):
if getattr(dihedral,'improper'):
name = 'IMPROPER_TRIG'
names.append(name)
paramlists.append(params)
return names, paramlists
def __init__(self, cms_file, system=None):
"""
Initializes a DesmondParse object which serves to read in a CMS file
into the abstract representation.
Args:
"""
self.cms_file = cms_file
if not system:
system = System()
self.system = system
self.vdwtypes = []
self.vdwtypeskeys = []
self.viparr = 1
self.fmct_blockpos = []
self.atom_blockpos = []
self.bond_blockpos = []
self.ffio_blockpos = []
self.paramlist = ff.build_paramlist('desmond')
self.unitvars = ff.build_unitvars('desmond', self.paramlist)
self.canonical_force_scale_into = 2.0
self.canonical_force_scale_from = 0.5
self.atom_col_vars = ['i_m_mmod_type',
'r_m_x_coord',
'r_m_y_coord',
'r_m_z_coord',
'i_m_residue_number',
's_m_pdb_residue_name',
'i_m_atomic_number',
's_m_pdb_atom_name',
's_m_atom_name',
'r_ffio_x_vel',
'r_ffio_y_vel',
'r_ffio_z_vel'
]
self.atom_box_vars = ['r_chorus_box_ax',
'r_chorus_box_ay',
'r_chorus_box_az',
'r_chorus_box_bx',
'r_chorus_box_by',
'r_chorus_box_bz',
'r_chorus_box_cx',
'r_chorus_box_cy',
'r_chorus_box_cz'
]
def get_parameter_list_from_kwds(self, force, kwds):
return ff.get_parameter_list_from_kwds(force, kwds, self.paramlist)
def get_parameter_list_from_force(self, force):
return ff.get_parameter_list_from_force(force, self.paramlist)
def get_parameter_kwds_from_force(self, force):
return ff.get_parameter_kwds_from_force(force, self.get_parameter_list_from_force, self.paramlist)
def create_kwd_dict(self, forcetype_object, values, optvalues = None):
kwd = ff.create_kwd_dict(self.unitvars, self.paramlist, forcetype_object, values, optvalues = optvalues)
return kwd
def create_forcetype(self, forcetype_object, paramlist, values, optvalues = None):
return forcetype_object(*paramlist, **self.create_kwd_dict(forcetype_object, values, optvalues))
def parse_ffio_block(self,start,end):
#LOAD FFIO BLOCKS IN FIRST (CONTAINS TOPOLOGY)
# read in a ffio_block that isn't ffio_ff and split it into the
# commands and the values.
# lots of room for additional error checking here, such as whether
# each entry has the correct number of data values, whether they are the correct type, etc.
# scroll to the next ffio entry
while not 'ffio_' in self.lines[start]:
# this is not an ffio block! or, we have reached the end of the file
if 'ffio_' not in self.lines[start]:
start += 1
if start >= end:
return 'Done with ffio', 0, None, None, None, start
components = re.split('\W', self.lines[start].split()[0]) # get rid of whitespace, split on nonword
ff_type = components[0]
ff_number = int(components[1])
i = start+1
entry_data = []
while not ':::' in self.lines[i]:
entry_data.append(self.lines[i].split()[0])
i+=1
i+=1 # skip the separator we just found
entry_values = []
while not ':::' in self.lines[i]:
if self.lines[i].strip(): # skip the blank spaces.
entry_values.append(self.lines[i])
i+=1
while '}' not in self.lines[i]: # wait until we hit an end to the block
i+=1
i+=1 # step past the end of the block
entry_dict = dict()
for j, d in enumerate(entry_data):
entry_dict[d] = j+1 # the first one is the entry number
return ff_type, ff_number, entry_data, entry_values, entry_dict, i
def store_ffio_data(self, ff_type, ff_number, entry_data, entry_values, entry_dict):
self.stored_ffio_data[ff_type] = dict()
self.stored_ffio_data[ff_type]['ff_type'] = ff_type
self.stored_ffio_data[ff_type]['ff_number'] = ff_number
self.stored_ffio_data[ff_type]['entry_data'] = entry_data
self.stored_ffio_data[ff_type]['entry_values'] = entry_values
self.stored_ffio_data[ff_type]['entry_dict'] = entry_dict
def retrieve_ffio_data(self, ff_type):
return [self.stored_ffio_data[ff_type]['ff_number'],
self.stored_ffio_data[ff_type]['entry_data'],
self.stored_ffio_data[ff_type]['entry_values'],
self.stored_ffio_data[ff_type]['entry_dict']
]
def parse_vdwtypes(self, type, current_molecule_type):
ff_number, entry_data, entry_values, entry_dict = self.retrieve_ffio_data(type)
# molecule name is at sites, but vdwtypes come
# before sites. So we store info in vdwtypes and
# edit it later at sites. Eventually, we should
# probably move to a model where we store sections
# we can't use yet, and then process them in the
# order we want.
logger.debug("Parsing [ vdwtypes ] ...")
for j in range(ff_number):
self.vdwtypes.append(entry_values[j].split()[3:]) #THIS IS ASSUMING ALL VDWTYPES ARE STORED AS LJ12_6_SIG_EPSILON
self.vdwtypeskeys.append(entry_values[j].split()[1])
def parse_sites(self, type, molname, i, start):
ff_number, entry_data, entry_values, entry_dict = self.retrieve_ffio_data(type)
#correlate with atomtypes and atoms in GROMACS
logger.debug("Parsing [ sites ] ...")
#set indices to avoid continually calling list functions.
ivdwtype = entry_data.index('s_ffio_vdwtype')+1
icharge = entry_data.index('r_ffio_charge')+1
imass = entry_data.index('r_ffio_mass')+1
stemp = None
etemp = None
if 'i_ffio_resnr' in entry_data:
iresnum = entry_data.index('i_ffio_resnr')+1
iresidue = entry_data.index('s_ffio_residue')+1
cgnr = 0
# create the atom type container for the datax
current_molecule_type = MoleculeType(name=molname)
current_molecule_type.nrexcl = 0 #PLACEHOLDER FOR NREXCL...WE NEED TO FIND OUT WHERE IT IS
#MRS: basically, we have to figure out the furthest number of bonds out
# to exclude OR explicitly set gromacs exclusions. Either should work.
# for now, we'll go with the latter
self.system.add_molecule_type(current_molecule_type)
current_molecule = Molecule(name=molname) # should this be the same molname several as lines up?
for j in range(ff_number):
values = split_with_quotes(entry_values[j])
if values[1] == "atom":
if ('i_ffio_resnr' in entry_data):
atom = Atom(int(values[0]), values[ivdwtype],
int(values[iresnum]),
values[iresidue])
else:
# No residuenr, means we will have identical atoms sharing this.
atom = Atom(int(values[0]), values[ivdwtype])
atom.atomtype = (0, values[ivdwtype])
atom.charge = (0, float(values[icharge])*units.elementary_charge)
atom.mass = (0, float(values[imass]) * units.amu)
stemp = float(self.vdwtypes[self.vdwtypeskeys.index(values[ivdwtype])][0]) * units.angstroms #was in angstroms
etemp = float(self.vdwtypes[self.vdwtypeskeys.index(values[ivdwtype])][1]) * units.kilocalorie_per_mole #was in kilocal per mol
atom.sigma = (0, stemp)
atom.epsilon = (0, etemp)
atom.cgnr = cgnr
cgnr+=1
newAtomType = None
current_molecule.add_atom(atom)
if not self.system._atomtypes.get(AbstractAtomType(atom.atomtype.get(0))): #if atomtype not in self.system, add it
if self.system.combination_rule == 'Multiply-C6C12':
sigma = (etemp/stemp)**(1/6)
epsilon = (stemp)/(4*sigma**6)
newAtomType = AtomCType(values[ivdwtypes], #atomtype/name
values[ivdwtype], #bondtype
-1, #atomic_number
float(values[imass]) * units.amu, #mass
float(values[icharge]) * units.elementary_charge, #charge--NEED TO CONVERT TO ACTUAL UNIT
'A', #pcharge...saw this in top--NEED TO CONVERT TO ACTUAL UNITS
sigma * units.kilocalorie_per_mole * angstroms**(6),
epsilon * units.kilocalorie_per_mole * unit.angstro,s**(12))
elif (self.system.combination_rule == 'Lorentz-Berthelot') or (self.system.combination_rule == 'Multiply-Sigeps'):
newAtomType = AtomSigepsType(values[ivdwtype], #atomtype/name
values[ivdwtype], #bondtype
-1, #atomic_number
float(values[imass]) * units.amu, #mass--NEED TO CONVERT TO ACTUAL UNITS
float(values[icharge]) * units.elementary_charge, #charge--NEED TO CONVERT TO ACTUAL UNIT
'A', #pcharge...saw this in top--NEED TO CONVERT TO ACTUAL UNITS
stemp,
etemp)
self.system.add_atomtype(newAtomType)
if len(self.atom_blockpos) > 1: #LOADING M_ATOMS
if self.atom_blockpos[0] < start:
# generate the new molecules for this block; the number of molecules depends on
# The number of molecules depends on the number of entries in ffio_sites (ff_number)
new_molecules = self.loadMAtoms(self.lines, self.atom_blockpos[0], i, current_molecule, ff_number)
self.atom_blockpos.pop(0)
index = 0
for molecule in new_molecules:
self.system.add_molecule(molecule)
# now construct an atomlist with all the atoms
for atom in molecule.atoms:
# does this need to be a deep copy?
# tmpatom = copy.deepcopy(atom)
# tmpatom.index = index
self.atomlist.append(atom)
index +=1
return self.system._molecule_types[molname]
def parse_bonds(self, type, current_molecule_type, i, start):
ff_number, entry_data, ev, ed = self.retrieve_ffio_data(type)
if len(self.bond_blockpos) > 1: #LOADING M_BONDS
if self.bond_blockpos[0] < start:
for molecule in iter(current_molecule_type.molecules):
npermol = len(molecule.atoms)
break
# of the parsers, this is the only one that uses 'lines'. Can we remove?
current_molecule_type.bond_forces = self.loadMBonds(self.lines, self.bond_blockpos[0], i, npermol)
self.bond_blockpos.pop(0)
logger.debug("Parsing [ bonds ]...")
for j in range(ff_number):
values = split_with_quotes(ev[j])
key = values[ed['s_ffio_funct']].upper()
atomnames = ['i_ffio_ai','i_ffio_aj']
atoms = [int(values[ed[a]]) for a in atomnames]
bondingtypes = [self.atomlist[atom-1].name for atom in atoms]
atoms.extend(bondingtypes)
cnames = ['r_ffio_c1','r_ffio_c2']
params = [float(values[ed[x]]) for x in cnames]
new_bond = self.create_forcetype(self.desmond_bonds[key], atoms, params)
kwds = self.get_parameter_kwds_from_force(new_bond)
new_bond = self.canonical_bond(new_bond, kwds, direction = 'into', name = key)
# removing the placeholder from matoms (should be a better way to do this?)
if new_bond:
old_bond = current_molecule_type.match_bonds(new_bond)
if old_bond:
new_bond.order = old_bond.order
current_molecule_type.bond_forces.remove(old_bond)
current_molecule_type.bond_forces.add(new_bond)
def parse_pairs(self, type, current_molecule_type):
ff_number, entry_data, ev, ed = self.retrieve_ffio_data(type)
logger.debug("Parsing [ pairs ] ...")
for j in range(ff_number):
ljcorr = False
coulcorr = False
new_pair = None
values = split_with_quotes(ev[j])
atomnames = ['i_ffio_ai','i_ffio_aj']
atoms = [int(values[ed[a]]) for a in atomnames]
bondingtypes = [self.atomlist[atom-1].name for atom in atoms]
params = atoms + bondingtypes
key = values[ed['s_ffio_funct']].upper()
if key == "LJ12_6_SIG_EPSILON":
new_pair = self.create_forcetype(LjSigepsPair, params,
[float(values[ed['r_ffio_c1']]),float(values[ed['r_ffio_c2']])])
elif key == "LJ" or key == "COULOMB":
# I think we just need LjSigepsPair, not LjPair?
new_pair = self.create_forcetype(LjDefaultPair, params, [0, 0])
if key == "LJ":
ljcorr = float(values[ed['r_ffio_c1']])
new_pair.scaleLJ = ljcorr
elif key == "COULOMB":
coulcorr = float(values[ed['r_ffio_c1']])
new_pair.scaleQQ = coulcorr
else:
warn("ReadError: didn't recognize type {:s} in line {:s}".format(key, ev[j]))
# now, we catch the matches and read them into a single potential
pair_match = current_molecule_type.match_pairs(new_pair)
if pair_match: # we found a pair with the same atoms; let's insert or delete information as needed.
remove_old = False
remove_new = False
if isinstance(new_pair, LjSigepsPair) and isinstance(pair_match, LjDefaultPair) and pair_match.scaleQQ:
#Need to add old scaleQQ to this new pair
new_pair.scaleQQ = pair_match.scaleQQ
remove_old = True
elif isinstance(pair_match, LjSigepsPair) and isinstance(new_pair, LjDefaultPair) and new_pair.scaleQQ:
#Need to add the scaleQQ to the old pair
pair_match.scaleQQ = new_pair.scaleQQ
remove_new = True
elif isinstance(new_pair,LjDefaultPair) and isinstance(pair_match,LjDefaultPair):
if pair_match.scaleQQ and not new_pair.scaleQQ:
new_pair.scaleQQ = pair_match.scaleQQ
remove_old = True
elif not pair_match.scaleQQ and new_pair.scaleQQ:
pair_match.scaleQQ = new_pair.scaleQQ
remove_new = True
if pair_match.scaleLJ and not new_pair.scaleLJ:
new_pair.scaleLJ = pair_match.scaleLJ
remove_new = True
elif not pair_match.scaleLJ and new_pair.scaleLJ:
pair_match.scaleLJ = new_pair.scaleLJ
remove_old = True
if remove_old:
current_molecule_type.pair_forces.remove(pair_match)
if remove_new:
new_pair = None
if coulcorr:
self.system.coulomb_correction = coulcorr # need this for gromacs to have the global declared
#If we have difference between global and local, catch in gromacs.
if ljcorr:
self.system.lj_correction = ljcorr # need this for gromacs to have the global declared
#If we have difference between global and local, catch in gromacs.
if new_pair:
current_molecule_type.pair_forces.add(new_pair)
# IMPORTANT: we are going to assume that all pairs are both LJ and COUL.
# if COUL is not included, then it is because the charges are zero, and they will give the
# same energy. This could eventually be improved by checking versus the sites.
def parse_angles(self, type, current_molecule_type):
ff_number, entry_data, ev, ed = self.retrieve_ffio_data(type)
logger.debug("Parsing [ angles ] ...")
for j in range(ff_number):
values = split_with_quotes(ev[j])
key = values[ed['s_ffio_funct']].upper()
atomnames = ['i_ffio_ai','i_ffio_aj','i_ffio_ak']
atoms = [int(values[ed[a]]) for a in atomnames]
bondingtypes = [self.atomlist[atom-1].name for atom in atoms]
atoms.extend(bondingtypes)
kwds = [float(values[ed['r_ffio_c1']]), float(values[ed['r_ffio_c2']])]
new_angle = self.create_forcetype(self.desmond_angles[key], atoms, kwds)
kwds = self.get_parameter_kwds_from_force(new_angle)
new_angle = self.canonical_angle(new_angle, kwds, direction = 'into', name = key,
molecule_type = current_molecule_type)
if new_angle:
current_molecule_type.angle_forces.add(new_angle)
def parse_dihedrals(self, type, current_molecule_type):
ff_number, entry_data, ev, ed = self.retrieve_ffio_data(type)
logger.debug("Parsing [ dihedrals ] ...")
for j in range(ff_number):
values = split_with_quotes(ev[j])
new_dihedral = None
dihedral_type = None
key = values[ed['s_ffio_funct']].upper()
atomnames = ['i_ffio_ai','i_ffio_aj','i_ffio_ak','i_ffio_al']
atoms = [int(values[ed[a]]) for a in atomnames]
bondingtypes = [self.atomlist[atom-1].name for atom in atoms]
atoms.extend(bondingtypes)
# not sure how to put the following lines in canonical, since it expects keywords,
# not strings of variable length. will have to fix later.
cnames = []
for e in entry_data:
if 'r_ffio_c' in e:
cnames.append(e) # append all of the constants, in order
if key == "IMPROPER_HARM":
kwds = [float(values[ed[cnames[0]]]), 2*float(values[ed[cnames[1]]])] # harmonic, multiple x2 for desmond convention.
elif key == "PROPER_TRIG" or key == "IMPROPER_TRIG":
kwds = [float(values[ed[x]]) for x in cnames]
elif key == "OPLS_PROPER" or key == "OPLS_IMPROPER":
# next 3 lines definitely not the right way to do it.
#opls_kwds = {key: value for key, value in zip("c1 c2 c3 c4".split(), [units.kilocalorie_per_mole * float(s) for s in values[7:11]])}
opls_kwds = [float(values[ed[x]]) * units.kilocalorie_per_mole for x in cnames[1:5]]
opls_kwds = convert_dihedral_from_fourier_to_trig(opls_kwds)
kwds = np.zeros(8) # will fill this in later.
new_dihedral = self.create_forcetype(self.desmond_dihedrals[key], atoms, kwds)
# really should be some way to get rid of this code below
if key == "OPLS_PROPER" or key == "OPLS_IMPROPER":
for key in opls_kwds.keys():
setattr(new_dihedral,key,opls_kwds[key])
# really should be some way to get rid of this code above
kwds = self.get_parameter_kwds_from_force(new_dihedral)
new_dihedral = self.canonical_dihedral(new_dihedral, kwds, direction = 'into', name = key,
molecule_type = current_molecule_type)
if new_dihedral:
current_molecule_type.dihedral_forces.add(new_dihedral)
def parse_torsion_torsion(self, type, current_molecule_type):
ff_number, entry_data, ev, ed = self.retrieve_ffio_data(type)
logger.debug("Parsing [ torsion-torsion ] ...")
for j in range(ff_number):
values = split_with_quotes(ev[j])
new_torsiontorsion = None
key = values[ed['s_ffio_funct']].upper()
if key == "CMAP":
# we shouldn't need to try/accept because there are no units.
new_torsiontorsion = TorsionTorsionCMAP(int(values[ed['i_ffio_ai']]),
int(values[ed['i_ffio_aj']]),
int(values[ed['i_ffio_ak']]),
int(values[ed['i_ffio_al']]),
int(values[ed['i_ffio_am']]),
int(values[ed['i_ffio_an']]),
int(values[ed['i_ffio_ao']]),
int(values[ed['i_ffio_ap']]),
'cmap',
int(values[ed['i_ffio_c1']]))
else:
warn("ReadError: found unsupported torsion-torsion type in: {:s}".format(str(line[i])))
if new_torsiontorsion:
current_molecule_type.torsiontorsion_forces.add(new_torsiontorsion)
def parse_exclusions(self, type, current_molecule_type):
ff_number, entry_data, ev, entry_dict = self.retrieve_ffio_data(type)
logger.debug("Parsing [ exclusions ] ...")
# currently, assumes no comments, could be dangerous?
for j in range(ff_number):
temp = split_with_quotes(ev[j])
temp.remove(temp[0])
current_molecule_type.exclusions.add(tuple([int(x) for x in temp]))
def parse_restraints(self, type, current_molecule_type):
ff_number, entry_data, ev, ed = self.retrieve_ffio_data(type)
logger.debug("Warning: Parsing [ restraints] not yet implemented")
def parse_constraints(self, type, current_molecule_type):
ff_number, entry_data, ev, ed = self.retrieve_ffio_data(type)
logger.debug("Parsing [ constraints ] ...")
ctype = 1
funct_pos = 0
atompos = [] #position of atoms in constraints; spread all over the place
lenpos = [] #position of atom length; spread all over the place
tempatom = []
templength = []
templen = 0
for j in range(len(entry_data)):
if entry_data[j] == 's_ffio_funct':
funct_pos = ctype
elif 'i_ffio' in entry_data[j]:
atompos.append(ctype)
elif 'r_ffio' in entry_data[j]:
lenpos.append(ctype)
ctype+=1
for j in range(ff_number):
# water constraints actually get written to rigidwater (i.e. settles) constraints.
if 'HOH' in ev[j] or 'AH' in ev[j]:
values = split_with_quotes(ev[j])
tempatom = []
templength = []
for a in atompos:
if not '<>' in values[a]:
tempatom.append(int(values[a]))
else:
tempatom.append(None)
for l in lenpos:
if not '<>' in values[l]:
if 'AH' in ev[j]:
templength.append(float(values[l])*units.angstroms) # Check units?
else:
templength.append(None*units.angstroms)
constr_type = values[funct_pos]
if 'HOH' in constr_type:
dOH = float(values[lenpos[1]])
if dOH != float(values[lenpos[2]]):
logger.debug("Warning: second length in a rigid water specification {:s} is not the same as the first {:s}".format(values[lenpos[1]],values[lenpos[2]]))
angle = float(values[lenpos[0]])/(180/math.pi)
dHH = 2*dOH*math.sin(angle/2)
params = [atompos[0], atompos[1], atompos[2], dOH*units.angstroms, dHH*units.angstroms]
new_rigidwater = RigidWater(*params)
if new_rigidwater:
current_molecule_type.rigidwaters.add(new_rigidwater)
elif 'AH' in constr_type:
templen = int(list(constr_type)[-1])
params = [tempatom[0], tempatom[1], templength[0], constr_type]
for t in range(2,templen+1):
params.extend([tempatom[t],templength[t-1]])
new_constraint = Constraint(*params)
if new_constraint:
current_molecule_type.constraints.add(new_constraint)
else:
warn("ReadError: found unsupported constraint type {:s}".format(ev[j]))
def load_ffio_block(self, molname, start, end):
# Loading in ffio blocks from Desmond format
# Args:
# molname: name of current molecule
# start: beginning of where ffio_ff starts for each molecule
# end: ending of where ffio_ff ends for each molecule
i = start
j = start
self.stored_ffio_types = [] # a list of stored ffio_type to keep track
# of the ordering later
self.stored_ffio_data = {} # dictionary of stored ffio_entries
values = []
constraints = []
temp = []
current_molecule_type = None
#There are several sections which require sites information to
#process. We keep a flag for this so that we are aware when
#we have seen the sites
bPreambleRead = False
namecol = 0
combrcol = 0
vdwtypercol = 0
#DEFAULT VALUES WHEN CONVERTING TO GROMACS
self.system.nonbonded_function = 1
self.system.genpairs = 'yes'
logger.debug('Parsing [ molecule {:s} ]'.format(molname))
logger.debug('Parsing [ ffio ]')
while i < end:
if not bPreambleRead:
# read the first section for the forces field info
while not (':::' in self.lines[i]):
if 's_ffio_name' in self.lines[i]:
namecol = i-start-1
elif 's_ffio_comb_rule' in self.lines[i]:
combrcol = i-start-1
elif 's_ffio_vdw_func' in self.lines[i]:
vdwtypercol = i-start-1
i+=1
i+=1 # skip the ':::'
# figure out combination rule
combrule = self.lines[i+combrcol].upper()
if "ARITHMETIC/GEOMETRIC" in combrule:
self.system.combination_rule = 'Lorentz-Berthelot'
elif "GEOMETRIC" in combrule:
self.system.combination_rule = 'Multiply-Sigeps'
elif "LJ12_6_C6C12" in combrule:
self.system.combination_rule = 'Multiply-C6C12'
if (vdwtypercol > 0):
vdwrule = self.lines[i+vdwtypercol]
# MISSING: need to identify vdw rule here -- currently assuming LJ12_6_sig_epsilon!
# skip to the next ffio entry
while not ('ffio' in self.lines[i]):
i+=1
bPreambleRead = True
ff_type, ff_number, entry_data, entry_values, entry_dict, i = self.parse_ffio_block(i, end)
self.stored_ffio_types.append(ff_type)
self.store_ffio_data(ff_type,ff_number, entry_data, entry_values, entry_dict)
# Reorder so 'vdwtypes' is first, then 'sites'. Could eventually get some simplification
# by putting sites first, but too much rewriting for now.
self.stored_ffio_types.insert(0, self.stored_ffio_types.pop(self.stored_ffio_types.index('ffio_sites')))
self.stored_ffio_types.insert(0, self.stored_ffio_types.pop(self.stored_ffio_types.index('ffio_vdwtypes')))
# now process all the data
for type in self.stored_ffio_types:
if type in self.sysDirective:
params = [type]
if type == 'ffio_sites':
params += [molname, i, start]
else:
params += [current_molecule_type]
if type == 'ffio_bonds':
params += [i, start]
if type == 'ffio_sites':
current_molecule_type = self.sysDirective[type](*params)
else:
self.sysDirective[type](*params)
elif type == 'Done with ffio':
continue
else:
while '}' not in self.lines[i]:
i+=1 # not the most robust if there is nesting in a particular pattern
def loadMBonds(self, lines, start, end, npermol): #adds new bonds for each molecule in System
# Loading in m_bonds in Desmond format
# Args:
# lines: list of all data in CMS format
# start: beginning of where m_bonds starts for each molecule
# end: ending of where m_bondsends for each molecule
logger.debug("Parsing [ m_bonds ] ...")
bg = False
newbond_force = None
values = []
i = start
bonds = set()
# right now, hard coded with header:
## First column is bond index #
# i_m_from
# i_m_to
# i_m_order
# which might need to be read at some point.
while i < end:
if ':::' in lines[i]:
if bg:
break
else:
bg = True
i+=1
if bg:
values = split_with_quotes(lines[i])
atomi = int(values[1])
atomj = int(values[2])
bondingtypei = self.atomlist[atomi-1].name
bondingtypej = self.atomlist[atomj-1].name
params = [atomi, atomj, bondingtypei, bondingtypej]
if atomi > npermol: # we've collected the number of atoms per molecule. Exit.
break
order = int(values[3])
kwd = [0, 0]
optkwd = {'order': order, 'c': False}
new_bond = self.create_forcetype(HarmonicBond, params, kwd, optkwd)
bonds.add(new_bond)
i+=1
return bonds
def loadMAtoms(self, lines, start, end, currentMolecule, slength): #adds positions and such to atoms in each molecule in System
# Loading in m_atoms from Desmond format
# Args:
# lines: list of all data in CMS format
# start: beginning of where m_atoms starts for each molecule
# end: ending of where m_atoms ends for each molecule
# currentMolecule
# slength: number of unique atoms in m_atoms, used to calculate repetitions
logger.debug("Parsing [ m_atom ] ...")
i = start
bg = False
pdbaname = ""
aname = ""
mult = int(re.split('\W',lines[start].split()[0])[1])/slength
cols = dict()
while i < end:
if ':::' in lines[i]:
i+=1
break
else:
if 'First column' in lines[i]:
start += 1
for c in self.atom_col_vars:
if c in lines[i]:
logger.debug(" Parsing [ {:s} ] ...".format(c))
cols[c] = i - start
break
i+=1
logger.debug(" Parsing atoms...")
atom = None
newMoleculeAtoms = []
molecules = []
j = 0
while j < mult:
newMolecule = copy.deepcopy(currentMolecule)
for atom in newMolecule.atoms:
if ':::' in lines[i]:
break
else:
aline = split_with_quotes(lines[i])
atom.residue_index = int(aline[cols['i_m_residue_number']])
atom.residue_name = aline[cols['s_m_pdb_residue_name']].strip()
try:
atom.atomic_number = int(aline[cols['i_m_atomic_number']])
except Exception as e:
logger.exception(e) # EDZ: just pass statement before, now exception is recorded, but supressed
atom.position = [float(aline[cols['r_m_x_coord']]) * units.angstroms,
float(aline[cols['r_m_y_coord']]) * units.angstroms,
float(aline[cols['r_m_z_coord']]) * units.angstroms]
atom.velocity = [0.0 * units.angstroms * units.picoseconds**(-1),
0.0 * units.angstroms * units.picoseconds**(-1),
0.0 * units.angstroms * units.picoseconds**(-1)]
if 'r_ffio_x_vel' in cols:
atom.velocity[0] = float(aline[cols['r_ffio_x_vel']]) * units.angstroms * units.picoseconds**(-1)
if 'r_ffio_y_vel' in cols:
atom.velocity[1] = float(aline[cols['r_ffio_y_vel']]) * units.angstroms * units.picoseconds**(-1)
if 'r_ffio_z_vel' in cols:
atom.velocity[2] = float(aline[cols['r_ffio_z_vel']]) * units.angstroms * units.picoseconds**(-1)
if 's_m_pdb_atom_name' in cols:
pdbaname = aline[cols['s_m_pdb_atom_name']].strip()
if 's_m_atom_name' in cols:
aname = aline[cols['s_m_atom_name']].strip()
if re.match('$^',pdbaname) and not re.match('$^',aname):
atom.name = aname
elif re.match('$^',aname) and not re.match('$^',pdbaname):
atom.name = pdbaname
elif re.search("\d+",pdbaname) and not re.search("\d+",aname):
if re.search("\D+",pdbaname) and re.search("\w+",pdbaname):
atom.name = pdbaname
else:
atom.name = aname
elif re.search("\d+",aname) and not re.search("\d+",pdbaname):
if re.search("\D+",aname) and re.search("\w+",aname):
atom.name = aname
else:
atom.name = pdbaname
elif re.match('$^',pdbaname) and re.match('$^',aname):
atom.name = "None"
else:
atom.name = aname #doesn't matter which we choose, so we'll go with atom name instead of pdb
i+=1
molecules.append(newMolecule)
j+=1
return molecules
def load_box_vector(self, lines, start, end):
# Loading Box Vector
# Create a Box Vector to load into the System
# Args:
# lines: all the lines of the file stored in an array
# start: starting position
# end: ending position
v = np.zeros([3, 3]) * units.angstroms
for i, line in enumerate(lines[start:end]):
if self.atom_box_vars[0] in line:
startboxlabel = i
if ':::' in line:
endlabel = i + start
break
startbox = startboxlabel + endlabel
for nvec, line in enumerate(lines[startbox:startbox + 9]):
j = nvec // 3
k = nvec % 3
v[j, k] = float(line.strip()) * units.angstrom
self.system.box_vector = v
def read(self):
# Load in data from file
# Read data in Desmond format
# Args:
molnames = []
with open(self.cms_file, 'r') as fl:
self.lines = list(fl)
i=0
j=0
self.atomtypes = dict()
self.atomlist = []
# figure out on which lines the different blocks begin and end.
for line in self.lines:
if 'f_m_ct' in line:
if j > 0:
self.fmct_blockpos.append(i)
j+=1
if 'm_atom' in line and not (('i_m' in line) or ('s_m' in line)):
if j > 1:
self.atom_blockpos.append(i)
j+=1
if 'm_bond' in line:
if j > 2:
self.bond_blockpos.append(i)
j+=1
if 'ffio_ff' in line:
if j > 2:
self.ffio_blockpos.append(i)
j+=1
i+=1
i-=1
self.fmct_blockpos.append(i)
self.atom_blockpos.append(i)
self.bond_blockpos.append(i)
self.ffio_blockpos.append(i)
self.sysDirective = {'ffio_vdwtypes': self.parse_vdwtypes,
'ffio_sites': self.parse_sites,
'ffio_bonds': self.parse_bonds,
'ffio_pairs': self.parse_pairs,
'ffio_angles': self.parse_angles,
'ffio_dihedrals': self.parse_dihedrals,
'ffio_torsion_torsion': self.parse_torsion_torsion,
'ffio_constraints': self.parse_constraints,
'ffio_exclusions': self.parse_exclusions,
'ffio_restraints': self.parse_restraints
}
#LOADING Ffio blocks
logger.debug("Reading ffio block...")
#MRS: warning -- currently no check to avoid duplicated molecule names. Investigate.
i = 0
j = 0
while i < (len(self.ffio_blockpos)-1):
j = self.fmct_blockpos[i]
while ':::' not in self.lines[j]:
j+=1
# make sure we have reasonable molecular names.
molname = self.lines[j+1].strip()
molname = molname.replace("\"","") # get rid of quotation marks so we can find unique names
if molname == "":
molname = "Molecule_"+str(len(molnames)+1)
molnames.append(molname)
self.load_ffio_block(molname, self.ffio_blockpos[i], self.fmct_blockpos[i+1]-1)
i+=1
i = 0
#LOAD RAW BOX VECTOR-Same throughout cms
logger.debug("Reading Box Vector...")
self.load_box_vector(self.lines, self.fmct_blockpos[0], self.atom_blockpos[0])
return self.system
def write_vdwtypes_and_sites(self, molecule):
logger.debug(" -Writing vdwtypes...")
i = 0
sites = []
vdwtypes = []
sig = None
ep = None
stemp = None
etemp = None
combrule = self.system.combination_rule
for atom in molecule.atoms:
i+=1
if atom.residue_index:
sites.append(' {:3d} {:5s} {:9.8f} {:9.8f} {:2s} {:1d} {:4s}\n'.format(
i, 'atom',
atom._charge[0].value_in_unit(units.elementary_charge),
atom._mass[0].value_in_unit(units.atomic_mass_unit),
atom.atomtype[0], atom.residue_index, atom.residue_name))
else:
sites.append(' {:3d} {:5s} {:9.8f} {:9.8f} {:2s}\n'.format(
i, 'atom',
atom._charge[0].value_in_unit(units.elementary_charge),
atom._mass[0].value_in_unit(units.atomic_mass_unit),
atom.atomtype[0]))
sig = float(atom.sigma[0].value_in_unit(units.angstroms))
ep = float(atom.epsilon[0].value_in_unit(units.kilocalorie_per_mole))
if combrule == 'Multiply-C6C12': #MRS: seems like this should be automated more?
stemp = ep * (4 * (sig**6))
etemp = stemp * (sig**6)
elif combrule in ['Lorentz-Berthelot','Multiply-Sigeps']:
stemp = sig
etemp = ep
vdwstring = ' {:2s} {:18s} {:8.8f} {:8.8f}\n'.format(atom.atomtype[0],
"LJ12_6_sig_epsilon",
float(stemp), float(etemp))
if vdwstring not in vdwtypes:
vdwtypes.append(vdwstring)
lines = []
lines.append(" ffio_vdwtypes[{:d}] {{\n".format(len(vdwtypes)))
lines.append(" s_ffio_name\n")
lines.append(" s_ffio_funct\n")
lines.append(" r_ffio_c1\n")
lines.append(" r_ffio_c2\n")
lines.append(" :::\n")
i = 0
for v in vdwtypes:
i+=1
lines.append(' {:d}{:2s}'.format(i,v))
lines.append(" :::\n")
lines.append(" }\n")
logger.debug(" -Writing sites...")
lines.append(" ffio_sites[{:d}] {{\n".format(len(sites)))
lines.append(" s_ffio_type\n")
lines.append(" r_ffio_charge\n")
lines.append(" r_ffio_mass\n")
lines.append(" s_ffio_vdwtype\n")
if len(sites[0].split()) > 5: # fix this to explicitly ask if resnr is in here rather than length
lines.append(" i_ffio_resnr\n")
lines.append(" s_ffio_residue\n")
lines.append(" :::\n")
for s in sites:
lines.append(' {:s}'.format(s))
lines.append(" :::\n")
lines.append(" }\n")
return lines
def write_bonds(self, moleculetype):
logger.debug(" -Writing bonds...")
dlines = list()
hlines = list()
hlines.append('ffio_bonds_placeholder\n')
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" s_ffio_funct\n")
hlines.append(" r_ffio_c1\n")
hlines.append(" r_ffio_c2\n")
hlines.append(" :::\n")
i = 0
bondlist = sorted(list(moleculetype.bond_forces), key=lambda x: (x.atom1, x.atom2))
for bond in bondlist:
atoms = [bond.atom1,bond.atom2]
kwds = self.get_parameter_kwds_from_force(bond)
names, paramlists = self.canonical_bond(bond, kwds, direction = 'from')
# could in general return multiple types and paramlists
for nbond, name in enumerate(names):
i += 1
converted_bond = self.desmond_bonds[name](*atoms, **paramlists[nbond])
line = ' {:d} {:d} {:d} {:s}'.format(i, atoms[0], atoms[1], name)
bond_params = self.get_parameter_list_from_force(converted_bond)
param_units = self.unitvars[converted_bond.__class__.__name__]
for param, param_unit in zip(bond_params, param_units):
line += " {:15.8f}".format(param.value_in_unit(param_unit))
line += '\n'
dlines.append(line)
header = " ffio_bonds[{:d}] {{\n".format(i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write_angles(self, moleculetype):
logger.debug(" -Writing angles...")
dlines = list()
hlines = list()
hlines.append(" ffio_angles_placeholder\n")
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" i_ffio_ak\n")
hlines.append(" s_ffio_funct\n")
hlines.append(" r_ffio_c1\n")
hlines.append(" r_ffio_c2\n")
hlines.append(" :::\n")
i = 0
anglelist = sorted(list(moleculetype.angle_forces), key=lambda x: (x.atom1,x.atom2,x.atom3))
for angle in anglelist:
atoms = [angle.atom1,angle.atom2,angle.atom3]
kwds = self.get_parameter_kwds_from_force(angle)
names, paramlists = self.canonical_angle(angle, kwds, direction = 'from')
# could return multiple names and kwd lists
for nangle, name in enumerate(names):
i+=1
converted_angle = self.desmond_angles[name](*atoms, **paramlists[nangle])
line = ' {:d} {:d} {:d} {:d} {:s}'.format(i, atoms[0], atoms[1], atoms[2], name)
angle_params = self.get_parameter_list_from_force(converted_angle)
param_units = self.unitvars[converted_angle.__class__.__name__]
for param, param_unit in zip(angle_params, param_units):
line += " {:15.8f}".format(param.value_in_unit(param_unit))
line += '\n'
dlines.append(line)
header = " ffio_angles[{:d}] {{\n".format(i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write_dihedrals(self, moleculetype):
logger.debug(" -Writing dihedrals...")
dlines = list()
hlines = list()
hlines.append(" ffio_dihedrals_placeholder\n")
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" i_ffio_ak\n")
hlines.append(" i_ffio_al\n")
hlines.append(" s_ffio_funct\n")
# we assume the maximum number of dihedral terms
hmax = 8
# assume the maximum number of dihedral terms (8) to simplify things for now
for ih in range(hmax):
hlines.append(" r_ffio_c{:d}\n".format(ih))
hlines.append(" :::\n")
i = 0
# sorting dihedrals by first index
dihedrallist = sorted(list(moleculetype.dihedral_forces), key=lambda x: (x.atom1, x.atom2, x.atom3, x.atom4))
for dihedral in dihedrallist:
atoms = [dihedral.atom1,dihedral.atom2,dihedral.atom3,dihedral.atom4]
kwds = self.get_parameter_kwds_from_force(dihedral)
names, paramlists = self.canonical_dihedral(dihedral, kwds, direction = 'from')
for ndihedrals, name in enumerate(names):
i+=1
line = ' {:d} {:d} {:d} {:d} {:d} {:s}'.format(i, atoms[0], atoms[1], atoms[2], atoms[3], name)
converted_dihedral= self.desmond_dihedrals[name](*atoms,**paramlists[ndihedrals])
dihedral_params = self.get_parameter_list_from_force(converted_dihedral)
param_units = self.unitvars[converted_dihedral.__class__.__name__]
for param, param_unit in zip(dihedral_params, param_units):
line += " {:15.8}".format(param.value_in_unit(param_unit))
for j in range(8-len(dihedral_params)):
line += " {:6.3f}".format(0.0)
line += '\n'
dlines.append(line)
header = " ffio_dihedrals[{:d}] {{\n".format(i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write_torsion_torsion(self, moleculetype):
# currently just cmap
logger.debug(" -Writing torsion-torsions...")
hlines = list()
dlines = list()
hlines.append(" ffio_torsion_torsion_placeholder\n")
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" i_ffio_ak\n")
hlines.append(" i_ffio_al\n")
hlines.append(" i_ffio_am\n")
hlines.append(" i_ffio_an\n")
hlines.append(" i_ffio_ao\n")
hlines.append(" i_ffio_ap\n")
hlines.append(" s_ffio_func\n")
hlines.append(" i_ffio_c1\n")
hlines.append(" :::\n")
i = 0
for torsiontorsion in moleculetype.torsiontorsion_forces:
i+=1
# only type of torsion/torsion is CMAP currently
dlines.append(' {:d} {:d} {:d} {:d} {:d} {:d} {:d} {:d} {:d} {:s} {:d}\n'.format(
i,
int(torsiontorsion.atom1), int(torsiontorsion.atom2),
int(torsiontorsion.atom3), int(torsiontorsion.atom4),
int(torsiontorsion.atom5), int(torsiontorsion.atom6),
int(torsiontorsion.atom7), int(torsiontorsion.atom8),
'cmap', torsiontorsion.chart))
header = " ffio_torsion_torsion[{:d}] {{\n".format(i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
dlines = list()
# write out the cmap terms: for now, write out all the
# charts. Later, we can scan through and only print out the ones we use
# and only include the relevant charts
if (i > 0): # only include cmap_charts if we need to
cmap_charts = cmap_parameters.get_cmap_charts()
for chart in cmap_charts:
chartlines = chart.split('\n')
for line in chartlines:
dlines.append(line + '\n')
hlines.extend(dlines)
return hlines
def write_exclusions(self, moleculetype):
logger.debug(" -Writing exclusions...")
hlines = list()
dlines = list()
hlines.append(" ffio_exclusions_placeholder\n")
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" :::\n")
i = 0
if moleculetype.nrexcl == 0:
# Should probably be determined entirely by the bonds,
# since settles now adds bonds. For now, leave this in
# for Desmond to Desmond conversion, where nrexcl is not
# determined. Probably should switch eventually.
exclusionlist = sorted(list(moleculetype.exclusions), key=lambda x: (x[0], x[1]))
for exclusion in moleculetype.exclusions:
i+=1
dlines.append(' {:d} {:d} {:d}\n'.format(i, int(exclusion[0]), int(exclusion[1])))
else:
if moleculetype.nrexcl > 4:
warn("Can't handle more than excluding 1-4 interactions right now!")
fullbondlist = []
fullbondlist = sorted(list(moleculetype.bond_forces), key=lambda x: (x.atom1, x.atom2))
# exclude HarmonicPotential types, which do not have exclusions.
bondlist = [bond for bond in fullbondlist if (not isinstance(bond, HarmonicPotentialBond))]
# first, figure out the first appearance of each atom in the bondlist
currentatom = 0
atompos = []
bondindex = 0
for molecule in moleculetype.molecules:
nsize = len(molecule.atoms)+1
break # only need the first
atombonds = np.zeros([nsize,8],int) # assume max of 8 for now
natombonds = np.zeros(nsize,int)
for bond in bondlist:
atombonds[bond.atom1,natombonds[bond.atom1]] = bond.atom2
natombonds[bond.atom1] += 1
atombonds[bond.atom2,natombonds[bond.atom2]] = bond.atom1
natombonds[bond.atom2] += 1
for atom in range(1,nsize):
atomexclude = set() # will be a unique set
# need to make this recursive! And there must be a better algorithm
for j1 in range(natombonds[atom]):
toatom1 = atombonds[atom,j1];
atomexclude.add(toatom1)
if moleculetype.nrexcl > 1:
for j2 in range(natombonds[toatom1]):
toatom2 = atombonds[toatom1,j2]
atomexclude.add(toatom2)
if moleculetype.nrexcl > 2:
for j3 in range(natombonds[toatom2]):
toatom3 = atombonds[toatom2,j3]
atomexclude.add(toatom3)
if moleculetype.nrexcl > 3:
for j4 in range(natombonds[toatom3]):
toatom4 = atombonds[toatom1,j4]
atomexclude.add(toatom4)
uniqueexclude = set(atomexclude)
for a in atomexclude:
if (a > atom):
i+=1
dlines.append(' {:d} {:d} {:d}\n'.format(i, atom, a))
header = " ffio_exclusions[{:d}] {{\n".format(i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write_pairs(self, moleculetype):
logger.debug(" -Writing pairs...")
dlines = list()
hlines = list()
hlines.append("ffio_pairs_placeholder\n")
hlines.append(" i_ffio_ai\n")
hlines.append(" i_ffio_aj\n")
hlines.append(" s_ffio_funct\n")
hlines.append(" r_ffio_c1\n")
hlines.append(" r_ffio_c2\n")
hlines.append(" :::\n")
i = 0
for pair in sorted(list(moleculetype.pair_forces), key=lambda x: (x.atom1, x.atom2)):
atoms = ' {:d} {:d} '.format(pair.atom1, pair.atom2)
# first, the COUL part.
if pair.__class__ in (LjDefaultPair, LjqDefaultPair, LjSigepsPair, LjCPair):
# the first two appear to be duplicates: consider merging.
if pair.scaleQQ:
scaleQQ = pair.scaleQQ
else:
scaleQQ = self.system.coulomb_correction
i += 1
dlines += ' {:d} {:s} Coulomb {:10.8f} <>\n'.format(i, atoms, scaleQQ)
elif pair._class in (LjqSigepsPair, LjqCPair):
warn("Desmond does not support pairtype {:s}!".format(pair.__class__.__name__ )) # may not be true?
else:
warn("Unknown pair type {:s}!".format(pair.__class__.__name__))
# now the LJ part.
if pair.__class__ in (LjDefaultPair,LjqDefaultPair):
if pair.scaleLJ:
scaleLJ = pair.scaleLJ
else:
scaleLJ = self.system.lj_correction
i += 1
dlines += ' {:d} {:s} LJ {:10.8f} <>\n'.format(i, atoms, scaleLJ)
elif pair.__class__ in (LjSigepsPair, LjqSigepsPair, LjCPair, LjqCPair):
# Check logic here -- not clear that we can correctly determine which type it is.
# Basically, I think it's whether scaleLJ is defined or not.
if pair.__class__ in (LjCPair, LjqCPair):
epsilon = 0.25 * (pair.C6**2) / pair.C12 # (16*eps^2*sig^12 / 4 eps*sig^12) = 4 eps
sigma = (0.25 * pair.C6 / epsilon)**(1.0/6.0) # (0.25 * 4 eps sig^6 / eps)^(1/6)
elif pair.__class__ in (LjSigepsPair, LjqCPair):
epsilon = pair.epsilon
sigma = pair.sigma
i += 1
dlines += ' {:d} {:s} LJ12_6_sig_epsilon {:10.8f} {:10.8f}\n'.format(i, atoms,
sigma.value_in_unit(units.angstroms),
epsilon.value_in_unit(units.kilocalorie_per_mole))
else:
warn("Unknown pair type {:s}!".format(pair.__class__.__name__))
header = " ffio_pairs[{:d}] {{\n".format(i)
hlines = end_header_section(i==0,header,hlines)
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write_constraints(self, moleculetype):
logger.debug(" -Writing constraints...")
isHOH = False
if len(moleculetype.rigidwaters) > 0:
alen = 3
clen = 3
else:
alen = 0
clen = 0
alen_max = alen
clen_max = clen
for constraint in moleculetype.constraints:
if constraint.type[0:2] == 'AH':
alen = constraint.n+1
clen = alen-1
if alen_max < alen:
alen_max = alen
clen_max = clen
# we now know the maximum length of all constraint types
# not sure we need to sort these, but makes it easier to debug
constraintlist = sorted(list(moleculetype.constraints),key=lambda x: x.atom1)
dlines = list()
hlines = list()
i = 0
for constraint in constraintlist: #calculate the max number of atoms in constraint
i+=1
if constraint.type == 'HOH':
cline = ' {:d} {:d} {:d} {:d} '.format(i,int(constraint.atom1),int(constraint.atom2),int(constraint.atom3))
for j in range(alen_max-3):
cline += '0 '
cline += constraint.type
cline += ' {:10.8f}'.format(float(constraint.length1.value_in_unit(units.degrees)))
cline += ' {:10.8f}'.format(float(constraint.length2.value_in_unit(units.angstroms)))
cline += ' {:10.8f}'.format(float(constraint.length2.value_in_unit(units.angstroms)))
for j in range(clen_max-3):
cline += ' <>'
elif constraint.type[0:2] == 'AH':
alen = constraint.n+1
clen = alen-1
catoms = [constraint.atom1]
clengths = []
for j in range(1,alen+1):
atomname = 'atom'+str(j+1)
lengthname = 'length'+str(j)
if hasattr(constraint,atomname):
catoms.append(getattr(constraint,atomname))
clengths.append(getattr(constraint,lengthname))
cline = ' {:d} '.format(i)
for j in range(alen):
cline += ' {:d} '.format(int(catoms[j]))
for j in range(alen,alen_max):
cline += ' <> '
cline += constraint.type
for j in range(clen):
cline += ' {:10.8f}'.format(float(clengths[j].value_in_unit(units.angstroms)))
for j in range(clen,clen_max):
cline += ' <>'
cline += '\n'
dlines.append(cline)
# now need to add the constraints specified through settles. Only one settles per molecule
for rigidwater in moleculetype.rigidwaters:
i += 1
# Assumes the water arrangement O, H, H, which might not always be the case. Consider adding detection.
cline = ' {:d} {:d} {:d} {:d} '.format(i, rigidwater.atom1, rigidwater.atom2, rigidwater.atom3)
for j in range(alen_max-3):
cline += '0 '
cline += ' HOH '
dOH = rigidwater.dOH.value_in_unit(units.angstroms)
dHH = rigidwater.dHH.value_in_unit(units.angstroms)
angle = 2.0*math.asin(0.5*dHH/dOH)*(180/math.pi) # could automate conversion. . .
cline += ' {:.8f} {:.8f} {:.8f}'.format(angle,dOH,dOH)
cline += '\n'
for j in range(alen,alen_max):
cline += ' 0.0'
dlines.append(cline)
hlines.append(" ffio_constraints[{:d}] {{\n".format(i))
if (i==0):
hlines.append(" :::\n")
else:
letters = ['i','j','k','l','m','n','o','p','q']
for j in range(alen_max):
hlines.append(' i_ffio_a{:s}\n'.format(letters[j]))
hlines.append(' s_ffio_funct\n')
for j in range(clen_max):
hlines.append(' r_ffio_c{:d}\n'.format(j+1))
hlines.append(" :::\n")
dlines.append(" :::\n")
dlines.append(" }\n")
hlines.extend(dlines)
return hlines
def write(self):
# Write this topology to file
# Write out this topology in Desmond format
# Args:
# filename: the name of the file to write out to
lines = list()
pos = 0
name = ''
logger.warning("MacroModel atom type is not defined in other files, is set to 1 for all cases as it must be validly defined for desmond files to run. However, it does not affect the energies.")
# for all CMS files
lines.append('{\n')
lines.append(' s_m_m2io_version\n')
lines.append(' :::\n')
lines.append(' 2.0.0\n')
lines.append('}\n')
#FIRST F_M_CT BLOCK
logger.debug("Writing first f_m_ct...")
lines.append('f_m_ct {\n')
lines.append(' s_m_title\n')
for c in self.atom_box_vars:
lines.append(' {:s}\n'.format(c))
lines.append(' s_ffio_ct_type\n')
lines.append(' :::\n')
#box vector
bv = self.system.box_vector
lines.append(' "Desmond file converted by InterMol"\n')
for bi in range(3):
for bj in range(3):
lines.append('{:22.11f}\n'.format(float(bv[bi][bj].value_in_unit(units.angstroms))))
lines.append(' full_system\n')
#M_ATOM
apos = len(lines) #pos of where m_atom will be; will need to overwite later based on the number of atoms
lines.append('m_atom\n')
lines.append(' # First column is atom index #\n')
for vars in self.atom_col_vars:
if '_pdb_atom' not in vars:
lines.append(' {:s}\n'.format(vars))
lines.append(' :::\n')
i = 0
nmol = 0
totalatoms = []
totalatoms.append(0)
for moleculetype in self.system._molecule_types.values():
for molecule in moleculetype.molecules:
for atom in molecule.atoms:
i += 1
line = ' {:d} {:d}'.format(i,1) #HAVE TO PUT THE 1 HERE OR ELSE DESMOND DIES, EVEN THOUGH IT DOESN'T USE IT
for j in range(3):
line += " {:10.8f}".format(float(atom._position[j].value_in_unit(units.angstroms)))
line += " {:2d} {:4s} {:2d} {:2s}".format(
atom.residue_index,
'"{:s}"'.format(atom.residue_name),
atom.atomic_number,
'"{:s}"'.format(atom.name))
if np.any(atom._velocity):
for j in range(3):
line += " {:10.8f}".format(float(atom._velocity[j].value_in_unit(units.angstroms / units.picoseconds)))
else:
for j in range(3):
line += " {:10.8f}".format(0)
lines.append(line + '\n')
totalatoms.append(i)
lines[apos] = " m_atom[{:d}] {{\n".format(i)
lines.append(' :::\n')
lines.append(' }\n')
bpos = len(lines)
i = 0
#M_BOND
hlines = list()
dlines = list()
hlines.append(' m_bond_placeholder\n')
hlines.append(' i_m_from\n')
hlines.append(' i_m_to\n')
hlines.append(' i_m_order\n')
hlines.append(' i_m_from_rep\n')
hlines.append(' i_m_to_rep\n')
hlines.append(' :::\n')
i = 0
nonecnt = 0
for moleculetype in self.system._molecule_types.values():
# sort the bondlist because Desmond requires the first time a bond is listed to have
# the atoms in ascending order
repeatmol = len(moleculetype.molecules)
#MRS: need to be fixed; gromacs loads in one set of bonds per molecue; desmond loads in all
# OrderedSet isn't indexable so get the first molecule by iterating.
for molecule in moleculetype.molecules:
atoms_per_molecule = len(molecule.atoms)
# all should have the same, once we have info from one, break.
break
bondlist = sorted(list(moleculetype.bond_forces), key=lambda x: (x.atom1,x.atom2))
for n in range(repeatmol):
for bond in bondlist:
if bond and bond.order:
i += 1
dlines.append(' {:d} {:d} {:d} {:d} {:d} {:d}\n'.format(i,
bond.atom1 + n*atoms_per_molecule + totalatoms[nmol],
bond.atom2 + n*atoms_per_molecule + totalatoms[nmol],
int(bond.order),
1,
1))
elif not bond:
nonecnt+=1
if nonecnt > 0:
logger.debug('FOUND {:d} BONDS THAT DO NOT EXIST'.format(nonecnt))
nmol +=1
hlines[0] = ' m_bond[{:d}] {{\n'.format(i)
if (i > 0):
lines.extend(hlines)
lines.extend(dlines)
lines.append(' :::\n')
lines.append(' }\n')
lines.append('}\n')
#WRITE OUT ALL FFIO AND F_M_CT BLOCKS
for molecule_name, moleculetype in self.system.molecule_types.items():
logger.debug('Writing molecule block {:s}...'.format(molecule_name))
#BEGINNING BLOCK
logger.debug(" Writing f_m_ct...")
lines.append('f_m_ct {\n')
lines.append(' s_m_title\n')
for c in self.atom_box_vars:
lines.append(' {:s}\n'.format(c))
lines.append(' s_ffio_ct_type\n')
lines.append(' :::\n')
lines.append(' "' + molecule_name + '"\n')
for bi in range(3):
for bj in range(3):
lines.append('{:22.11f}\n'.format(float(bv[bi][bj].value_in_unit(units.angstroms))))
lines.append(' solute\n')
#M_ATOMS
logger.debug(" Writing m_atoms...")
apos = len(lines) #pos of where m_atom will be; will need to overwite later based on the number of atoms
lines.append('m_atom\n')
lines.append(' # First column is atom index #\n')
for vars in self.atom_col_vars:
if '_pdb_atom' not in vars: # kludge, have better filter
lines.append(' {:s}\n'.format(vars))
lines.append(' :::\n')
i = 0
for molecule in moleculetype.molecules:
for atom in molecule.atoms:
i += 1
#NOT SURE WHAT TO PUT FOR MMOD TYPE; 1 is currently used.
#This can't be determined currently from the information provided,
# unless it is stored previous, nor is it used by desmond
line = ' {:d} {:d}'.format(i,1)
for j in range(3):
line += " {:10.8f}".format(float(atom._position[j].value_in_unit(units.angstroms)))
line += " {:2d} {:4s} {:2d} {:2s}".format(
atom.residue_index,
'"{:s}"'.format(atom.residue_name),
atom.atomic_number,
'"{:s}"'.format(atom.name))
if np.any(atom._velocity):
for j in range(3):
line += " {:10.8f}".format(float(atom._velocity[j].value_in_unit(units.angstroms / units.picoseconds)))
else:
for j in range(3):
line += " {:10.8f}".format(0)
lines.append(line + '\n')
lines[apos] = ' m_atom[{:d}] {{\n'.format(i)
lines.append(' :::\n')
lines.append(' }\n')
logger.debug(" Writing m_bonds...")
hlines = list()
dlines = list()
hlines.append('m_bond_placeholder\n')
hlines.append(' i_m_from\n')
hlines.append(' i_m_to\n')
hlines.append(' i_m_order\n')
hlines.append(' i_m_from_rep\n')
hlines.append(' i_m_to_rep\n')
hlines.append(' :::\n')
i = 0
nonecnt = 0
repeatmol = len(moleculetype.molecules)
for molecule in moleculetype.molecules:
atoms_per_molecule = len(molecule.atoms)
break
bondlist = sorted(list(moleculetype.bond_forces), key=lambda x: x.atom1)
for n in range(repeatmol):
for bond in bondlist:
if bond and bond.order:
i += 1
dlines.append(' {:d} {:d} {:d} {:d} {:d} {:d}\n'.format(i,
bond.atom1 + n*atoms_per_molecule,
bond.atom2 + n*atoms_per_molecule,
int(bond.order),
1,
1))
else:
nonecnt+=1
if nonecnt > 0:
logger.debug('FOUND {:d} BONDS THAT DO NOT EXIST'.format(nonecnt))
header = ' m_bond[{:d}] {{\n'.format(i)
if (i>0):
hlines = end_header_section(False,header,hlines)
lines.extend(hlines)
lines.extend(dlines)
lines.append(' :::\n')
lines.append(' }\n')
# only need the first molecule
molecule = next(iter(moleculetype.molecules))
logger.debug(" Writing ffio...")
lines.append(' ffio_ff {\n')
lines.append(' s_ffio_name\n')
lines.append(' s_ffio_comb_rule\n')
lines.append(' i_ffio_version\n')
lines.append(' :::\n')
#Adding Molecule Name
if "Viparr" in molecule_name:
lines.append(' Generated by Viparr\n')
else:
lines.append(' "{:s}"\n'.format(molecule_name))
#Adding Combination Rule
if self.system.combination_rule == 'Multiply-C6C12':
lines.append(' C6C12\n') # this may not exist in DESMOND, or if so, need to be corrected
elif self.system.combination_rule == 'Lorentz-Berthelot':
lines.append(' ARITHMETIC/GEOMETRIC\n')
elif self.system.combination_rule == 'Multiply-Sigeps':
lines.append(' GEOMETRIC\n')
#Adding Version
lines.append(' 1.0.0\n') #All files had this, check if version is 1.0.0
lines += self.write_vdwtypes_and_sites(molecule)
lines += self.write_bonds(moleculetype)
lines += self.write_angles(moleculetype)
lines += self.write_dihedrals(moleculetype)
lines += self.write_torsion_torsion(moleculetype)
lines += self.write_exclusions(moleculetype)
lines += self.write_pairs(moleculetype)
lines += self.write_constraints(moleculetype)
#STILL NEED TO ADD RESTRAINTS
lines.append(" }\n")
lines.append("}\n")
with open(self.cms_file, 'w') as fout:
for line in lines:
fout.write(line)
|
mrshirts/InterMol
|
intermol/desmond/desmond_parser.py
|
Python
|
mit
| 85,216
|
[
"Desmond",
"Gromacs",
"MacroModel"
] |
17e17ca4838f10822f789bdfa801cf41f69543a0f632f6ab98ee8242b752532e
|
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""
Python wrapper functions for the ``C`` package:
Basic Linear Algebra Subroutines (BLAS)
See also:
http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms
and
http://www.netlib.org/lapack/lug/node145.html
"""
import numpy as np
from gpaw.utilities import is_contiguous
from gpaw import debug
import _gpaw
def scal(alpha, x):
"""alpha x
Performs the operation::
x <- alpha * x
"""
if isinstance(alpha, complex):
assert is_contiguous(x, complex)
else:
assert isinstance(alpha, float)
assert x.dtype in [float, complex]
assert x.flags.contiguous
_gpaw.scal(alpha, x)
def gemm(alpha, a, b, beta, c, transa='n'):
"""General Matrix Multiply.
Performs the operation::
c <- alpha * b.a + beta * c
If transa is "n", ``b.a`` denotes the matrix multiplication defined by::
_
\
(b.a) = ) b * a
ijkl... /_ ip pjkl...
p
If transa is "t" or "c", ``b.a`` denotes the matrix multiplication
defined by::
_
\
(b.a) = ) b * a
ij /_ iklm... jklm...
klm...
where in case of "c" also complex conjugate of a is taken.
"""
assert np.isfinite(c).all()
assert (a.dtype == float and b.dtype == float and c.dtype == float and
isinstance(alpha, float) and isinstance(beta, float) or
a.dtype == complex and b.dtype == complex and c.dtype == complex)
if transa == 'n':
assert a.size == 0 or a[0].flags.contiguous
assert c.flags.contiguous or c.ndim == 2 and c.strides[1] == c.itemsize
assert b.ndim == 2
assert b.strides[1] == b.itemsize
assert a.shape[0] == b.shape[1]
assert c.shape == b.shape[0:1] + a.shape[1:]
else:
assert a.flags.contiguous
assert b.size == 0 or b[0].flags.contiguous
assert c.strides[1] == c.itemsize
assert a.shape[1:] == b.shape[1:]
assert c.shape == (b.shape[0], a.shape[0])
_gpaw.gemm(alpha, a, b, beta, c, transa)
def gemv(alpha, a, x, beta, y, trans='t'):
"""General Matrix Vector product.
Performs the operation::
y <- alpha * a.x + beta * y
``a.x`` denotes matrix multiplication, where the product-sum is
over the entire length of the vector x and
the first dimension of a (for trans='n'), or
the last dimension of a (for trans='t' or 'c').
If trans='c', the complex conjugate of a is used. The default is
trans='t', i.e. behaviour like np.dot with a 2D matrix and a vector.
Example::
>>> y_m = np.dot(A_mn, x_n)
>>> # or better yet
>>> y_m = np.zeros(A_mn.shape[0], A_mn.dtype)
>>> gemv(1.0, A_mn, x_n, 0.0, y_m)
"""
assert (a.dtype == float and x.dtype == float and y.dtype == float and
isinstance(alpha, float) and isinstance(beta, float) or
a.dtype == complex and x.dtype == complex and y.dtype == complex)
assert a.flags.contiguous
assert y.flags.contiguous
assert x.ndim == 1
assert y.ndim == a.ndim-1
if trans == 'n':
assert a.shape[0] == x.shape[0]
assert a.shape[1:] == y.shape
else:
assert a.shape[-1] == x.shape[0]
assert a.shape[:-1] == y.shape
_gpaw.gemv(alpha, a, x, beta, y, trans)
def axpy(alpha, x, y):
"""alpha x plus y.
Performs the operation::
y <- alpha * x + y
"""
if isinstance(alpha, complex):
assert is_contiguous(x, complex) and is_contiguous(y, complex)
else:
assert isinstance(alpha, float)
assert x.dtype in [float, complex]
assert x.dtype == y.dtype
assert x.flags.contiguous and y.flags.contiguous
assert x.shape == y.shape
_gpaw.axpy(alpha, x, y)
def czher(alpha, x, a):
"""alpha x * x.conj() + a.
Performs the operation::
y <- alpha * x * x.conj() + a
where x is a N element vector and a is a N by N hermitian matrix, alpha is a real scalar
"""
assert isinstance(alpha, float)
assert is_contiguous(x, complex) and is_contiguous(a, complex)
assert x.flags.contiguous and a.flags.contiguous
assert x.ndim == 1 and a.ndim == 2
assert x.shape[0] == a.shape[0]
_gpaw.czher(alpha, x, a)
def rk(alpha, a, beta, c, trans='c'):
"""Rank-k update of a matrix.
Performs the operation::
dag
c <- alpha * a . a + beta * c
where ``a.b`` denotes the matrix multiplication defined by::
_
\
(a.b) = ) a * b
ij /_ ipklm... pjklm...
pklm...
``dag`` denotes the hermitian conjugate (complex conjugation plus a
swap of axis 0 and 1).
Only the lower triangle of ``c`` will contain sensible numbers.
"""
assert np.isfinite(c).all()
assert (a.dtype == float and c.dtype == float or
a.dtype == complex and c.dtype == complex)
assert a.flags.contiguous
assert a.ndim > 1
if trans == 'n':
assert c.shape == (a.shape[1], a.shape[1])
else:
assert c.shape == (a.shape[0], a.shape[0])
assert c.strides[1] == c.itemsize
_gpaw.rk(alpha, a, beta, c, trans)
def r2k(alpha, a, b, beta, c):
"""Rank-2k update of a matrix.
Performs the operation::
dag cc dag
c <- alpha * a . b + alpha * b . a + beta * c
where ``a.b`` denotes the matrix multiplication defined by::
_
\
(a.b) = ) a * b
ij /_ ipklm... pjklm...
pklm...
``cc`` denotes complex conjugation.
``dag`` denotes the hermitian conjugate (complex conjugation plus a
swap of axis 0 and 1).
Only the lower triangle of ``c`` will contain sensible numbers.
"""
assert np.isfinite(c).all()
assert (a.dtype == float and b.dtype == float and c.dtype == float or
a.dtype == complex and b.dtype == complex and c.dtype == complex)
assert a.flags.contiguous and b.flags.contiguous
assert np.rank(a) > 1
assert a.shape == b.shape
assert c.shape == (a.shape[0], a.shape[0])
assert c.strides[1] == c.itemsize
_gpaw.r2k(alpha, a, b, beta, c)
def dotc(a, b):
"""Dot product, conjugating the first vector with complex arguments.
Returns the value of the operation::
_
\ cc
) a * b
/_ ijk... ijk...
ijk...
``cc`` denotes complex conjugation.
"""
assert ((is_contiguous(a, float) and is_contiguous(b, float)) or
(is_contiguous(a, complex) and is_contiguous(b,complex)))
assert a.shape == b.shape
return _gpaw.dotc(a, b)
def dotu(a, b):
"""Dot product, NOT conjugating the first vector with complex arguments.
Returns the value of the operation::
_
\
) a * b
/_ ijk... ijk...
ijk...
"""
assert ((is_contiguous(a, float) and is_contiguous(b, float)) or
(is_contiguous(a, complex) and is_contiguous(b,complex)))
assert a.shape == b.shape
return _gpaw.dotu(a, b)
def _gemmdot(a, b, alpha=1.0, beta=1.0, out=None, trans='n'):
"""Matrix multiplication using gemm.
return reference to out, where::
out <- alpha * a . b + beta * out
If out is None, a suitably sized zero array will be created.
``a.b`` denotes matrix multiplication, where the product-sum is
over the last dimension of a, and either
the first dimension of b (for trans='n'), or
the last dimension of b (for trans='t' or 'c').
If trans='c', the complex conjugate of b is used.
"""
# Store original shapes
ashape = a.shape
bshape = b.shape
# Vector-vector multiplication is handled by dotu
if a.ndim == 1 and b.ndim == 1:
assert out is None
if trans == 'c':
return alpha * _gpaw.dotc(b, a) # dotc conjugates *first* argument
else:
return alpha * _gpaw.dotu(a, b)
## # Use gemv if a or b is a vector, and the other is a matrix??
## if a.ndim == 1 and trans == 'n':
## gemv(alpha, b, a, beta, out, trans='n')
## if b.ndim == 1 and trans == 'n':
## gemv(alpha, a, b, beta, out, trans='t')
# Map all arrays to 2D arrays
a = a.reshape(-1, a.shape[-1])
if trans == 'n':
b = b.reshape(b.shape[0], -1)
outshape = a.shape[0], b.shape[1]
else: # 't' or 'c'
b = b.reshape(-1, b.shape[-1])
# Apply BLAS gemm routine
outshape = a.shape[0], b.shape[trans == 'n']
if out is None:
# (ATLAS can't handle uninitialized output array)
out = np.zeros(outshape, a.dtype)
else:
out = out.reshape(outshape)
gemm(alpha, b, a, beta, out, trans)
# Determine actual shape of result array
if trans == 'n':
outshape = ashape[:-1] + bshape[1:]
else: # 't' or 'c'
outshape = ashape[:-1] + bshape[:-1]
return out.reshape(outshape)
def _rotate(in_jj, U_ij, a=1., b=0., out_ii=None, work_ij=None):
"""Perform matrix rotation using gemm
For the 2D input matrices in, U, do the rotation::
out <- a * U . in . U^d + b * out
where '.' denotes matrix multiplication and '^d' the hermitian conjugate.
work_ij is a temporary work array for storing the intermediate product.
out_ii, and work_ij are created if not given.
The method returns a reference to out.
"""
if work_ij is None:
work_ij = np.zeros_like(U_ij)
if out_ii is None:
out_ii = np.zeros(U_ij.shape[:1] * 2, U_ij.dtype)
if in_jj.dtype == float:
trans = 't'
else:
trans = 'c'
gemm(1., in_jj, U_ij, 0., work_ij, 'n')
gemm(a, U_ij, work_ij, b, out_ii, trans)
return out_ii
if not debug:
scal = _gpaw.scal
gemm = _gpaw.gemm
gemv = _gpaw.gemv
axpy = _gpaw.axpy
rk = _gpaw.rk
r2k = _gpaw.r2k
dotc = _gpaw.dotc
dotu = _gpaw.dotu
gemmdot = _gemmdot
rotate = _rotate
else:
def gemmdot(a, b, alpha=1.0, beta=1.0, out=None, trans='n'):
assert a.flags.contiguous
assert b.flags.contiguous
assert a.dtype == b.dtype
if trans == 'n':
assert a.shape[-1] == b.shape[0]
else:
assert a.shape[-1] == b.shape[-1]
if out is not None:
assert out.flags.contiguous
assert a.dtype == out.dtype
assert a.ndim > 1 or b.ndim > 1
if trans == 'n':
assert out.shape == a.shape[:-1] + b.shape[1:]
else:
assert out.shape == a.shape[:-1] + b.shape[:-1]
return _gemmdot(a, b, alpha, beta, out, trans)
def rotate(in_jj, U_ij, a=1., b=0., out_ii=None, work_ij=None):
assert in_jj.dtype == U_ij.dtype
assert in_jj.flags.contiguous
assert U_ij.flags.contiguous
assert in_jj.shape == U_ij.shape[1:] * 2
if out_ii is not None:
assert out_ii.dtype == in_jj.dtype
assert out_ii.flags.contiguous
assert out_ii.shape == U_ij.shape[:1] * 2
if work_ij is not None:
assert work_ij.dtype == in_jj.dtype
assert work_ij.flags.contiguous
assert work_ij.shape == U_ij.shape
return _rotate(in_jj, U_ij, a, b, out_ii, work_ij)
|
robwarm/gpaw-symm
|
gpaw/utilities/blas.py
|
Python
|
gpl-3.0
| 11,661
|
[
"GPAW"
] |
42be37c58728d994244516f1ebb5553bef80fa5c3bfef05e84bc37496a241541
|
# these commands get executed in the current scope
# of each new shell (but not for canned commands)
from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
def get_selected_elements(doc):
"""API change in Revit 2016 makes old method throw an error"""
try:
# Revit 2016
return [doc.GetElement(id)
for id in __revit__.ActiveUIDocument.Selection.GetElementIds()]
except:
# old method
return list(__revit__.ActiveUIDocument.Selection.Elements)
selection = get_selected_elements(doc)
# convenience variable for first element in selection
if len(selection):
s0 = selection[0]
# ------------------------------------------------------------------------------
import clr
from Autodesk.Revit.DB import ElementSet, ElementId
class RevitLookup(object):
def __init__(self, uiApplication):
"""
for RevitSnoop to function properly, it needs to be instantiated
with a reference to the Revit Application object.
"""
# find the RevitLookup plugin
try:
rlapp = [app for app in uiApplication.LoadedApplications
if app.GetType().Namespace == 'RevitLookup'
and app.GetType().Name == 'App'][0]
except IndexError:
self.RevitLookup = None
return
# tell IronPython about the assembly of the RevitLookup plugin
clr.AddReference(rlapp.GetType().Assembly)
import RevitLookup
self.RevitLookup = RevitLookup
# See note in CollectorExt.cs in the RevitLookup source:
self.RevitLookup.Snoop.CollectorExts.CollectorExt.m_app = uiApplication
self.revit = uiApplication
def lookup(self, element):
if not self.RevitLookup:
print('RevitLookup not installed. Visit https://github.com/jeremytammik/RevitLookup to install.')
return
if isinstance(element, int):
element = self.revit.ActiveUIDocument.Document.GetElement(ElementId(element))
if isinstance(element, ElementId):
element = self.revit.ActiveUIDocument.Document.GetElement(element)
if isinstance(element, list):
elementSet = ElementSet()
for e in element:
elementSet.Insert(e)
element = elementSet
form = self.RevitLookup.Snoop.Forms.Objects(element)
form.ShowDialog()
_revitlookup = RevitLookup(__revit__)
def lookup(element):
_revitlookup.lookup(element)
# ------------------------------------------------------------------------------
# a fix for the __window__.Close() bug introduced with the non-modal console
class WindowWrapper(object):
def __init__(self, win):
self.win = win
def Close(self):
self.win.Dispatcher.Invoke(lambda *_: self.win.Close())
def __getattr__(self, name):
return getattr(self.win, name)
def set_font_sizes(self, size):
self.rps_repl = self.win.Content.Children[0].Children[0].Content.Children[0]
self.rps_editor = self.win.Content.Children[2].Children[1].Children[0]
self.rps_repl.FontSize = size
self.rps_editor.FontSize = size
__window__ = WindowWrapper(__window__)
|
architecture-building-systems/revitpythonshell
|
RevitPythonShell/DefaultConfig/init.py
|
Python
|
mit
| 3,563
|
[
"VisIt"
] |
6733fb262431b6ee1bb2fde43d9b71a09300e869eeeb359f675ee3bff630c5ad
|
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from pycmbs.data import Data
import os
from pycmbs.netcdf import *
import numpy as np
class Icon(Data):
"""
Main class for ICON data handling
"""
def __init__(self, filename, gridfile, varname, read=False, **kwargs):
"""
Parameters
----------
filename : str
filename of data file
gridfile : str
filename of grid definition file
varname : str
name of variable to handle
read : bool
specify if data should be read immediately
"""
Data.__init__(self, filename, varname, **kwargs)
self.gridfile = gridfile
self.gridtype = 'unstructured'
#---
def read(self, time_var='time'):
"""
This is a special routine for reading data from ICON structure
a bit redundant to Data.read()
Parameters
----------
time_var : str
name of time variable (default='time')
"""
print('Reading ICON data ...')
if not os.path.exists(self.filename):
raise ValueError('File not existing: %s' % self.filename)
if not os.path.exists(self.gridfile):
raise ValueError('File not existing: %s' % self.gridfile)
#--- time variable
self.time_var = time_var
#--- data field
# [time,ncell]
self.data = self.read_netcdf(self.varname)
nt, ncell = self.data.shape
# reshape so we have a common 3D structure like always in pyCMBS
self.data = self.data.reshape((nt, 1, ncell))
if self.data is None:
raise ValueError('The data in the file %s is not existing. \
This must not happen!' % self.filename)
if self.scale_factor is None:
raise ValueError('The scale_factor for file %s is NONE, \
this must not happen!' % self.filename)
self.data *= self.scale_factor
#--- read lat/lon
File = NetCDFHandler()
File.open_file(self.gridfile, 'r')
# grid cell center coordinates
self.lon = File.get_variable('clon') * 180. / np.pi
self.lat = File.get_variable('clat') * 180. / np.pi
self.ncell = len(self.lon)
self.vlon = File.get_variable('clon_vertices') * 180. / np.pi
self.vlat = File.get_variable('clat_vertices') * 180. / np.pi
File.close()
#--- read time variable
if self.time_var is not None:
# returns either None or a masked array
self.time = self.read_netcdf(self.time_var)
if hasattr(self.time, 'mask'):
self.time = self.time.data
else:
self.time is None
if self.time is not None:
if self.time.ndim != 1:
# remove singletone dimensions
self.time = self.time.flatten()
else:
self.time = None
#--- determine time --> convert to python timestep
if self.time is not None:
self.set_time()
|
pygeo/pycmbs
|
pycmbs/icon.py
|
Python
|
mit
| 3,243
|
[
"NetCDF"
] |
2a22844c489aa71abf2dbea11534558857916b35e78eee2bea02e2abff201012
|
# -*- coding: utf-8 -*-
#
# if_curve.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
IF curve example
----------------
This example illustrates how to measure the I-F curve of a neuron.
The program creates a small group of neurons and injects a noisy current
:math:`I(t) = I_mean + I_std*W(t)`
where :math:`W(t)` is a white noise process.
The program systematically drives the current through a series of values in
the two-dimensional `(I_mean, I_std)` space and measures the firing rate of
the neurons.
In this example, we measure the I-F curve of the adaptive exponential
integrate and fire neuron (``aeif_cond_exp``), but any other neuron model that
accepts current inputs is possible. The model and its parameters are
supplied when the IF_curve object is created.
"""
import numpy
import nest
import shelve
###############################################################################
# Here we define which model and the neuron parameters to use for measuring
# the transfer function.
model = 'aeif_cond_exp'
params = {'a': 4.0,
'b': 80.8,
'V_th': -50.4,
'Delta_T': 2.0,
'I_e': 0.0,
'C_m': 281.0,
'g_L': 30.0,
'V_reset': -70.6,
'tau_w': 144.0,
't_ref': 5.0,
'V_peak': -40.0,
'E_L': -70.6,
'E_ex': 0.,
'E_in': -70.}
class IF_curve():
t_inter_trial = 200. # Interval between two successive measurement trials
t_sim = 1000. # Duration of a measurement trial
n_neurons = 100 # Number of neurons
n_threads = 4 # Nubmer of threads to run the simulation
def __init__(self, model, params=None):
self.model = model
self.params = params
self.build()
self.connect()
def build(self):
#######################################################################
# We reset NEST to delete information from previous simulations
# and adjust the number of threads.
nest.ResetKernel()
nest.local_num_threads = self.n_threads
#######################################################################
# We create neurons and devices with specified parameters.
self.neuron = nest.Create(self.model, self.n_neurons, self.params)
self.noise = nest.Create('noise_generator')
self.spike_recorder = nest.Create('spike_recorder')
def connect(self):
#######################################################################
# We connect the noisy current to the neurons and the neurons to
# the spike recorders.
nest.Connect(self.noise, self.neuron, 'all_to_all')
nest.Connect(self.neuron, self.spike_recorder, 'all_to_all')
def output_rate(self, mean, std):
self.build()
self.connect()
#######################################################################
# We adjust the parameters of the noise according to the current
# values.
self.noise.set(mean=mean, std=std, start=0.0, stop=1000., origin=0.)
# We simulate the network and calculate the rate.
nest.Simulate(self.t_sim)
rate = self.spike_recorder.n_events * 1000. / (1. * self.n_neurons * self.t_sim)
return rate
def compute_transfer(self, i_mean=(400.0, 900.0, 50.0),
i_std=(0.0, 600.0, 50.0)):
#######################################################################
# We loop through all possible combinations of `(I_mean, I_sigma)`
# and measure the output rate of the neuron.
self.i_range = numpy.arange(*i_mean)
self.std_range = numpy.arange(*i_std)
self.rate = numpy.zeros((self.i_range.size, self.std_range.size))
nest.set_verbosity('M_WARNING')
for n, i in enumerate(self.i_range):
print('I = {0}'.format(i))
for m, std in enumerate(self.std_range):
self.rate[n, m] = self.output_rate(i, std)
transfer = IF_curve(model, params)
transfer.compute_transfer()
###############################################################################
# After the simulation is finished, we store the data into a file for
# later analysis.
with shelve.open(model + '_transfer.dat') as dat:
dat['I_mean'] = transfer.i_range
dat['I_std'] = transfer.std_range
dat['rate'] = transfer.rate
|
sdiazpier/nest-simulator
|
pynest/examples/if_curve.py
|
Python
|
gpl-2.0
| 5,046
|
[
"NEURON"
] |
678709d8105a6d1963cff2190be471ecbf941c01e85c0775450129ee8538660d
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Maintenance Visit')
class TestMaintenanceVisit(unittest.TestCase):
pass
|
indictranstech/osmosis-erpnext
|
erpnext/support/doctype/maintenance_visit/test_maintenance_visit.py
|
Python
|
agpl-3.0
| 299
|
[
"VisIt"
] |
6db29d789403f234bc740802aa48c1e8e90278a61dbeddf37a91e49e7d6b65e4
|
__author__ = 'sibirrer'
#this file contains a class to make a moffat profile
class Moffat(object):
"""
this class contains functions to evaluate a Gaussian function and calculates its derivative and hessian matrix
"""
def function(self, x, y, amp, alpha, beta, center_x, center_y):
"""
returns Moffat profile
"""
x_shift = x - center_x
y_shift = y - center_y
return amp * (1 +(x_shift**2+y_shift**2)/alpha**2)**(-beta)
|
sibirrer/astrofunc
|
astrofunc/LightProfiles/moffat.py
|
Python
|
mit
| 486
|
[
"Gaussian"
] |
be7036277a7cf8a1b5ac550c5ab008e31b74371557f30b5d47304445ae93fd88
|
"""
Implements non-blocking search operation(s) in a separate non-modal window
that tracks progress while searches are running. For each search that finishes,
the main thread updates the progress bar to provide the user with an
indication of overall progress.
"""
from tkinter import *
from tkinter import ttk
import threading, queue
#import time
from searches.blast import blast_setup
from results import result_obj
blast_path = '/usr/local/ncbi/blast/bin'
tmp_dir = '/Users/cklinger/git/Goat/tmp'
class ProgressFrame(Frame):
def __init__(self, algorithm, search_list, callback=None,
callback_args=None, parent=None, no_win=True):
Frame.__init__(self, parent)
self.pack(expand=YES, fill=BOTH)
self.algorithm = algorithm
self.search_list = search_list # list with objects and args
self.num_todo = len(search_list)
self.num_finished = 1 # don't index from zero, first search is number 1
self.callback = callback
self.callback_args = callback_args
self.no_win = no_win
# Make non-modal, i.e. un-closeable
self.parent = parent
self.parent.protocol('WM_DELETE_WINDOW', lambda: None)
# Add some information
Label(self, text='Searching for queries using {}'.format(self.algorithm)).pack()
# Code to add progress bar, update 'value' attr after each search
self.p = ttk.Progressbar(self, # parent
orient = HORIZONTAL,
length = 200,
mode = 'determinate', # specifies a set number of steps
maximum = len(self.search_list))
self.p.pack()
# Add another label that can be modified on each search
self.search_label = Label(self, text='Performing search {} of {}'.format(
self.num_finished, self.num_todo), anchor='center', justify='center')
self.search_label.pack(side=BOTTOM, expand=YES)
def run(self):
"""start producer thread, consumer loop"""
self.queue = queue.Queue()
threading.Thread(target=self._run).start()
self.thread_consumer()
def _run(self):
"""Function called by the thread, runs each BLAST search"""
robjs = []
for sobj,qid,db,qobj,dbf,outpath,rdb,rid in self.search_list:
if sobj.algorithm == 'blast':
if sobj.q_type == 'protein' and sobj.db_type == 'protein':
blast_search = blast_setup.BLASTp(blast_path, qobj,
dbf, outpath)
blast_search.run_from_stdin()
robj = result_obj.Result(rid, sobj.algorithm,
sobj.q_type, sobj.db_type, qid, db, sobj.name, outpath)
robjs.append(robj)
self.num_finished += 1
self.queue.put(robjs) # indicates success
def thread_consumer(self):
"""Checks the queue regularly for new results"""
# Even if there are no results to grab, update status bar each time
self.p['value'] = self.num_finished
self.search_label['text'] = 'Performing search {} of {}'.format(
self.num_finished, self.num_todo)
try:
#print('trying')
robjs = self.queue.get(block=False)
except(queue.Empty): # nothing to grab
#print('got nothing')
self.after(200, self.thread_consumer)
# when finished
else:
#print("calling else block")
if self.callback:
self.callback(*robjs)
if self.no_win:
self.parent.destroy()
|
chris-klinger/Goat
|
gui/searches/threaded_search.py
|
Python
|
gpl-3.0
| 3,582
|
[
"BLAST"
] |
d42937928c6c51c7989791ab2ae3517919d3a2f69f770f6b59cd0a5027088bdb
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015, Adrián Gómez Pueyo and Alberto Castro
# This file is part of maxdft.
# maxdft is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# maxdft is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# You should have received a copy of the GNU General Public License
# along with maxdft. If not, see <http://www.gnu.org/licenses/>.
import array
import math
import string
import sys
def DFT():
"""Read the DFT density from a file
In this function we read from the static/density.y=0,z=0
file the electronic density of our system computed
using the KS equations, store it in an array ndft, check
if we are getting NaN values (usually caused by the minimization
algorithms when the parameter variation gives high values for the
IPF) and return this array as output.
"""
filename = 'static/density.y=0,z=0'
try:
fread = open(filename, 'r')
except IOError:
print 'Error opening the file ' + filename
sys.exit(0)
ndft = array.array('d',[])
lines = fread.readlines()
for line in lines[2:]:
data = string.split(line)
ndft.append(float(data[1]))
for i in ndft:
if math.isnan(i):
print '\n\nError reading the DFT density from the ' + filename\
+ ' file:'
print ' Octopus is returning a NaN value for the '\
+ 'electronic density\n'
sys.exit(2)
fread.close()
return ndft
def SCH(kpar):
"""Read the SCH density from a file
In this function we read from the static/density.y=0,z=0
file the electronic density of our system computed
using the SCH equation, store it in an array nsch, check
if we are getting NaN values (usually caused by the minimization
algorithms when the parameter variation gives high values for the
IPF) and return this array as output.
"""
if kpar is None:
filename = 'density'
else:
filename = 'test' + str(kpar)
try:
fread = open(filename, 'r')
except IOError:
print 'Error opening the file ' + filename
sys.exit(0)
nsch = array.array('d',[])
for i, line in enumerate(fread):
data = string.split(line)
nsch.append(float(data[1]))
for i in nsch:
if math.isnan(i):
print '\n\nError reading the SCH density from the '\
+ filename + ' file:'
print ' Octopus is returning a NaN value for the '\
+ 'electronic density\n'
sys.exit(2)
fread.close()
return nsch
|
albertocbarrigon/maxdft
|
src/read.py
|
Python
|
gpl-3.0
| 2,984
|
[
"Octopus"
] |
1f14c0c68adea62c99e6bc7a9f291dc9938669b2bc718fcd291fce8baf717071
|
# -*- coding: utf-8 -*-
{
"'Cancel' will indicate an asset log entry did not occur": "' 취소 ' 자산 로그 항목을 표시합니다 발생하지 않았습니다.",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": "위치 이 지역의 지리적 영역을 지정합니다. 이 위치 계층 위치일 수 있습니다, ' group ', 또는 해당 영역의 경계에 있는 위치.",
"Acronym of the organization's name, eg. IFRC.": 'acronym 조직의 이름, 예 ifrc.',
"Authenticate system's Twitter account": '인증할 사용자의 시스템 twitter 계정',
"Can't import tweepy": 'tweepy를 가져올 수 없습니다',
"Caution: doesn't respect the framework rules!": '주의: 프레임워크는 규칙에 대해 않습니다!',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "형식 속성 값 및 rgb 값을 사용하십시오 이러한 json 오브젝트 (예: {빨간색 목록으로 '#FF0000 ', 초록색으로 '#00FF00 ', yellow: '#FFFF00 '}",
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": '선택한 경우, 이 자산의 위치를 사용자의 위치 갱신될 때마다 갱신됩니다.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": '이 구성은 region 메뉴의 영역을 나타내는 경우, 이름 메뉴에서 사용할 수 있습니다. 개인용 맵 구성에 대한 이름은 사용자의 이름으로 설정됩니다.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": '이 필드를 채우지 않으면 다음 이 조직 최대 서명할 때 지정하는 사용자 도메인 필드에 도메인 일치하지 않는 한 이 조직의 직원은 지정됩니다.',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": '이 체크 인 경우 이 사용자의 기본 위치 및 따라서 사용자가 맵에 표시됩니다 됩니다',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "목록에 있는 병원 보이지 않는 경우, 링크 병원 ' 추가 ' 를 눌러 새로 추가할 수 있습니다.",
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "목록에 있는 사무실 보이지 않는 경우 링크 ' 부재중 ' 추가를 클릭하여 새로 추가할 수 있습니다.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "목록에서 조직 표시되지 않으면, 링크 추가 ' 조직 ' 을 클릭하여 새 추가할 수 있습니다.",
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": '대신 자동으로 네트워크를 통해 다른 피어에서 동기화, 파일, 필요한 곳에 네트워크 의 경우 동기화 수. 이 페이지에서 파일 동기화 데이터 반입 및 데이터 파일을 sync 내보낼 수 있습니다. 이 페이지로 이동할 수 있는 링크를 누르십시오.',
"Level is higher than parent's": '상위 레벨이 아닌 경우',
"Need a 'url' argument!": "' url ' 인수가 필요합니다!",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "선택사항입니다. geometry 컬럼의 이름. postgis 의 기본값은 ' the_geom'. 수",
"Parent level should be higher than this record's level. Parent level is": '상위 레벨 이상 이 레코드 레벨 이상이어야 합니다. 상위 레벨',
"Password fields don't match": '암호 필드가 일치하지 않습니다.',
"Phone number to donate to this organization's relief efforts.": '전화 번호 이 조직의 릴리프 위해 노력을 donate.',
"Please come back after sometime if that doesn't help.": '해당 되는 경우 후 sometime.',
"Quantity in %s's Inventory": '수량% s 명세에',
"Select a Room from the list or click 'Create Room'": "목록에서 미팅룸을 선택하십시오 ' 추가하십시오 미팅룸 '",
"Select a person in charge for status 'assigned'": "지정된 ' 상태 ' 에 대한 사용자 선택",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "이 경우 모든 특정 위치는 위치 계층 의 최상위 레벨 상위 선택하십시오. 예를 들어, ' 특별지방자치단체 ' 계층 작은 디비전으로, 모든 특정 위치를 특별지방자치단체 상위로 않아도 됩니다.",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": "이 경우 모든 특정 위치는 위치 계층 구조의 상위 위치를 선택하십시오. 영향받는 이 영역을 표시하는 ' region ' 설정하는 데 도움이 됩니다.",
"Sorry, things didn't get done on time.": '죄송합니다, 같은 시간에 수행되지 않았습니다.',
"Sorry, we couldn't find that page.": '죄송합니다. 이 페이지를 찾을 수 없습니다.',
"System's Twitter account updated": '시스템 twitter 갱신된 계정',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "(doner (s) 이 프로젝트. 다중 값 ' 제어 ' 키를 선택할 수 있습니다.",
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": '이미지 파일의 url. 이미지 파일 업로드 사용하지 않을 경우, 해당 위치를 지정해야 합니다.',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "사용자 이름으로 검색하려면, 첫 번째, 중간 또는 마지막 이름을 입력하십시오, 공백으로 구분됩니다. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 개인을 나열하십시오.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "본문에 대한 검색, 본문의 id 태그를 입력하십시오. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 본체를 모두 나열하십시오.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "병원 에 대한 검색, 병원 의 이름 또는 id 중 하나를 입력하거나, 조직 이름 또는 약어, 공백으로 구분됩니다. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 병원 나열하십시오.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "병원 에 대한 검색, 병원 의 이름 또는 id 를 공백으로 구분됩니다. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 병원 나열하십시오.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "위치를 검색하려면 이름을 입력하십시오. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 위치 목록에 없습니다.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "사용자를 검색할 수 있는 경우, 첫 번째, 중간 또는 마지막 이름 및/또는 개인 id 번호 중 하나를 공백으로 구분하여 입력하십시오. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 개인을 나열하십시오.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "평가에 대한 검색, 어떤 부분이 평가의 티켓 번호를 입력하십시오. % 와일드 카드로 사용할 수 있습니다. ' 검색 ' 입력 모든 평가를 나열하십시오.",
"Type the first few characters of one of the Person's names.": '이름 중 첫 몇글자를 입력하세요.',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": '이미지 파일을 업로드하십시오. 이미지 파일 업로드 경우, url 필드에 해당 위치를 지정해야 합니다.',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": '다른 때 데이터 동기화 충돌이 있는 경우 두 (또는 그 이상) 자가 이를 모두 수정, 즉 충돌 정보를 정보를 동기화할 때 발생합니다. 동기화 모듈은 충돌을 자동으로 해결할 수 시도하나 일부 경우에 이를 수 없습니다. 이러한 경우, 것은 사용자에게 이러한 충돌을 수동으로 해결할 수 없는 경우, 이 페이지로 이동할 수 있는 링크를 누르십시오.',
"You haven't made any calculations": '임의의 계산을 수행한 없음',
"couldn't be parsed so NetworkLinks not followed.": 'networklinks. 그 때문에 구문 분석할 수 없습니다.',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": '이 openlayers 아직 지원되지 않은 groundoverlay 또는 screenoverlay, 따라서 제대로 작동하지 않을 수 있습니다.',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '\\ " update\\ " \\ " field1=\'newvalue\'\\ " 와 같은 선택적 표현식입니다. 갱신하거나 조인의 결과를 삭제할 수 없습니다.',
'# of International Staff': '# 국제 직원',
'# of National Staff': '# 자국 인력을',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nif 요청 유형 "%(type)s", %(type)s 를 다음 화면에서 입력하십시오.',
'%(system_name)s - Verify Email': '%(system_name)s - 확인할 메일',
'%s rows deleted': '% s 행 삭제',
'%s rows updated': '% s 행',
'& then click on the map below to adjust the Lat/Lon fields': '& 맵핑 아래 lat/₩ 조정하려면 필드를 누르십시오',
'* Required Fields': '* 필수 필드',
'0-15 minutes': '0-15 분',
'1 Assessment': '1 평가',
'1 location, shorter time, can contain multiple Tasks': '1 위치, 짧은 시간에 여러 태스크를 포함할 수 있습니다',
'1-3 days': '1-3일',
'15-30 minutes': '15-30분',
'2 different options are provided here currently:': '2 다른 옵션을 현재 제공됩니다.',
'2x4 Car': '자동차 2x4',
'30-60 minutes': '30-60 분',
'4-7 days': '4-7 일',
'4x4 Car': '자동차 4x4',
'8-14 days': '8-14 일',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': '개별 위치에 지정된 마커를 경우 필요한 기능을 클래스에 지정된 마커를 대체하기 위해 설정됩니다.',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': '참조 문서 같은 파일로, url 또는 이 데이터 검증하십시오. 문서 1 의 이름 몇 자를 기존 문서를 링크할 수 있습니다.',
'A brief description of the group (optional)': '간략한 설명 (선택적)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'xml 파일 형식으로 지리적 위치 일련의 포함하는 gps 다운로드됩니다.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': '이 파일은 시간소인이 사진이 있는 시간소인이 있는 맵에서 찾을 상관될 수 있는 gps gpx 형식으로 가져옵니다.',
'A library of digital resources, such as photos, documents and reports': '라이브러리 자원 (예: 디지털 사진, 문서 및 보고서',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': '하나의 관리 region 내에 포함되지 않은 경우 위치 그룹 영향받는 영역의 범위를 정의하는 데 사용할 수 있습니다.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': '위치 그룹 위치 세트 (종종 결합된 영역을 나타내는 관리 region 세트입니다).',
'A location group must have at least one member.': '위치 그룹에는 적어도 하나의 구성원이 있어야 합니다.',
'ABOUT THIS MODULE': '모듈 정보',
'ACCESS DATA': '액세스 데이터',
'ANY': '모두',
'API is documented here': 'api 여기에 설명되어 있습니다',
'ATC-20 Rapid Evaluation modified for New Zealand': 'atc-20 빠른 평가 뉴질랜드 수정된',
'Abbreviation': '약어',
'Ability to Fill Out Surveys': '체크아웃 기능을 서베이를 채울 수',
'Ability to customize the list of details tracked at a Shelter': '기능 shelter 추적되는 상세 목록을 사용자 정의할 수',
'Ability to customize the list of human resource tracked at a Shelter': '기능 shelter 추적되는 인적 자원 목록을 사용자 정의할 수',
'Ability to customize the list of important facilities needed at a Shelter': '기능은 shelter 필요한 중요한 기능의 목록을 사용자 수',
'Ability to view Results of Completed and/or partially filled out Surveys': '기능 완료 및/조사 결과 보기 또는 부분적으로 채워진 체크아웃할 수',
'About': '제품 정보',
'Access denied': '액세스 거부됨',
'Access to Shelter': 'shelter 에 액세스',
'Access to education services': '액세스 교육 서비스',
'Accessibility of Affected Location': '내게 필요한 옵션 받는 위치',
'Account Registered - Please Check Your Email': '계정 등록-전자 우편 확인',
'Acronym': '약어',
'Actionable by all targeted recipients': '가능한 모든 대상으로 받는',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': '실행 가능한 유일한 에서 연습 참여자; id 표시되어야 하는 연습<note>',
'Actioned?': '실행되지?',
'Actions taken as a result of this request.': '이 요청의 결과로 취한 조치.',
'Actions': '조치',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': '해당 자원 (인력, 자산 및 설비) 할당을 시나리오 템플리트 활성화를 이벤트.',
'Active Problems': '활성 문제',
'Activities matching Assessments:': '활동 평가 일치:',
'Activities of boys 13-17yrs before disaster': '활동 boys 재해 전에 13-17yrs',
'Activities of boys 13-17yrs now': '활동 boys 13-17yrs 이제',
'Activities of boys <12yrs before disaster': '활동 boys <12yrs 재해 전에',
'Activities of boys <12yrs now': 'boys <12yrs 활동 이제',
'Activities of children': '하위 활동',
'Activities of girls 13-17yrs before disaster': '활동 girls 13-17yrs 재해 전에',
'Activities of girls 13-17yrs now': 'girls 13-17yrs 활동 이제',
'Activities of girls <12yrs before disaster': '활동 girls <12yrs 재해 전에',
'Activities of girls <12yrs now': 'girls <12yrs 활동 이제',
'Activities': '액티비티',
'Activities:': '활동:',
'Activity Added': '활동 추가',
'Activity Deleted': '활동 삭제',
'Activity Details': '활동 세부사항',
'Activity Report': '활동 보고서',
'Activity Reports': '활동 보고서',
'Activity Type': '활동 유형',
'Activity Updated': '갱신된 활동',
'Activity': '활동',
'Add Activity Type': '추가 활동 유형',
'Add Address': '주소 추가',
'Add Alternative Item': '대체 항목 추가',
'Add Assessment Summary': '추가 평가 요약',
'Add Assessment': '평가 추가',
'Add Asset Log Entry - Change Label': '로그 항목-자산 변경할 레이블 추가',
'Add Availability': '추가 가용성',
'Add Baseline Type': '기준선 추가 유형',
'Add Baseline': '기준선 추가',
'Add Bundle': '번들 추가',
'Add Camp Service': '자녀를 서비스 추가',
'Add Camp Type': '자녀를 유형 추가',
'Add Camp': '자녀를 추가',
'Add Certificate for Course': 'certicate 코스 추가',
'Add Certification': '인증 추가',
'Add Competency': '능력 추가',
'Add Contact': '연락처 추가',
'Add Contact Information': '문의처 정보 추가',
'Add Credential': '권한 정보 추가',
'Add Credentials': '권한 정보 추가',
'Add Disaster Victims': '피해 희생 추가',
'Add Distribution.': '분배 추가.',
'Add Donor': '추가 공여',
'Add Flood Report': '플러드 보고서 추가',
'Add Group Member': '그룹 구성원 추가',
'Add Human Resource': '인적 자원 추가',
'Add Identity': 'id 추가',
'Add Image': '이미지 추가',
'Add Impact Type': '추가 영향 유형',
'Add Impact': '영향 추가',
'Add Item to Catalog': '카탈로그 항목 추가',
'Add Item to Commitment': '때,확약 항목 추가',
'Add Item to Inventory': '재고 항목 추가',
'Add Item to Request': '항목 추가 요청',
'Add Item to Shipment': '운송 항목에 대한 추가',
'Add Item': '항목 추가',
'Add Job Role': '작업 역할 추가',
'Add Key': '키 추가',
'Add Kit': '추가 kit',
'Add Level 1 Assessment': '레벨 1 평가 추가',
'Add Level 2 Assessment': '레벨 2 평가 추가',
'Add Log Entry': '로그 항목 추가',
'Add Member': '회원 추가',
'Add Membership': '멤버십 추가',
'Add Message': '메시지 추가',
'Add Mission': '추가 mission',
'Add Need Type': '필요한 추가 유형',
'Add Need': '필요한 추가',
'Add New Assessment Summary': '새 평가 요약',
'Add New Baseline Type': '새 기준선 유형 추가',
'Add New Baseline': '새 기준선 추가',
'Add New Budget': '새 예산 추가',
'Add New Bundle': '새 번들 추가',
'Add New Camp Service': '새 자녀를 서비스 추가',
'Add New Camp Type': '새 자녀를 유형 추가',
'Add New Camp': '새 자녀를 추가',
'Add New Cluster Subsector': '새 클러스터 subsector 추가',
'Add New Cluster': '새 클러스터 추가',
'Add New Commitment Item': '추가할 새 항목은 확약',
'Add New Document': '새 문서 추가',
'Add New Donor': '새 제공자 추가',
'Add New Entry': '새 항목 추가',
'Add New Event': '새 이벤트 추가',
'Add New Flood Report': '새 범람 보고서 추가하기',
'Add New Human Resource': '추가할 새 인적 자원',
'Add New Image': '새 이미지 추가하기',
'Add New Impact Type': '새 영향 유형 추가',
'Add New Impact': '새 영향 추가',
'Add New Item to Kit': '새 항목 추가 로 kit',
'Add New Key': '새 키 추가',
'Add New Level 1 Assessment': '새 레벨 1 평가 추가',
'Add New Level 2 Assessment': '새 레벨 2 평가 추가',
'Add New Member': '새 멤버 추가',
'Add New Membership': '새 구성원 추가',
'Add New Need Type': '새 하는 유형 추가',
'Add New Need': '새 추가 합니다',
'Add New Population Statistic': '새 인구 통계 추가',
'Add New Problem': '새 문제점 추가',
'Add New Rapid Assessment': '추가할 새 신속한 평가',
'Add New Received Item': '수신된 새 항목 추가',
'Add New Record': '새 레코드 추가',
'Add New Request Item': '새 품목 요청',
'Add New Request': '새 요청 추가',
'Add New River': '새 river 추가',
'Add New Role to User': '새 역할에 사용자 추가',
'Add New Scenario': '새 시나리오 추가',
'Add New Sent Item': '새 보낸 항목 추가',
'Add New Setting': '새 설정 추가',
'Add New Solution': '새 솔루션 추가',
'Add New Staff Type': '새 직원 유형 추가',
'Add New Subsector': '새 subsector 추가',
'Add New Survey Answer': '새 설문지 응답 추가',
'Add New Survey Question': '새 설문지 질문 추가',
'Add New Survey Series': '새 설문지 시리즈 추가',
'Add New Survey Template': '새 서베이 템플리트 추가',
'Add New Team': '새 팀 추가',
'Add New Ticket': '새 티켓 추가',
'Add New Track': '추가할 새 추적',
'Add New User to Role': '새 사용자 역할 추가',
'Add New': '새로 추가',
'Add Peer': '피어 추가',
'Add Person': '사용자 추가',
'Add Photo': '사진 추가',
'Add Population Statistic': '인구 통계 추가',
'Add Position': '위치 추가',
'Add Problem': '추가 문제점',
'Add Question': '질문 추가',
'Add Rapid Assessment': '빠른 평가 추가',
'Add Record': '레코드 추가',
'Add Reference Document': '참조 문서 추가',
'Add Report': '보고서 추가',
'Add Request': '요청 추가',
'Add Section': '섹션 추가',
'Add Setting': '설정 추가',
'Add Skill Equivalence': '기술 반복기에 추가',
'Add Skill Provision': '기술 프로비저닝하려면 추가',
'Add Solution': '솔루션 추가',
'Add Staff Type': '추가 직원 유형',
'Add Subscription': '등록 추가',
'Add Subsector': '추가 subsector',
'Add Survey Answer': '서베이 응답 추가',
'Add Survey Question': '서베이 질문 추가',
'Add Survey Series': '추가 조사 시리즈',
'Add Survey Template': '서베이 템플리트 추가',
'Add Team Member': '회원 추가',
'Add Team': '팀 추가',
'Add Ticket': '티켓 추가',
'Add Training': '교육 추가',
'Add Unit': '단위 추가',
'Add Volunteer Availability': '지원자 가용성 추가',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': '같은 파일 참조 문서 추가, url 또는 이 데이터 검증하십시오. 참조 문서 입력하지 않으면, 대신 표시됩니다.',
'Add a Volunteer': '를 자발적으로 추가',
'Add a new certificate to the catalog.': '새 인증 카탈로그에 추가하십시오.',
'Add a new competency rating to the catalog.': '새 능력 등급 카탈로그에 추가하십시오.',
'Add a new course to the catalog.': '새 과정 카탈로그에 추가하십시오.',
'Add a new job role to the catalog.': '카탈로그에 새 작업 역할을 추가하십시오.',
'Add a new skill provision to the catalog.': '새로운 기술 프로비저닝 카탈로그에 추가하십시오.',
'Add a new skill to the catalog.': '새 항목을 추가하려면.',
'Add a new skill type to the catalog.': '새 항목 유형 카탈로그에 추가하십시오.',
'Add new Group': '새 그룹 추가',
'Add new Individual': '새 개별 추가',
'Add new project.': '새 프로젝트를 추가하십시오.',
'Add staff members': '스태프 구성원 추가',
'Add to Bundle': '번들에 추가',
'Add to budget': '에 예산 추가',
'Add volunteers': 'volunteers 추가',
'Add/Edit/Remove Layers': '추가/편집/계층 제거',
'Added to Group': '구성원 추가',
'Added to Team': '구성원 추가',
'Additional Beds / 24hrs': '추가 의료용/24hrs',
'Address Details': '주소 상세정보',
'Address Type': '주소 유형',
'Address added': '주소 추가',
'Address deleted': '주소 삭제',
'Address updated': '주소 갱신',
'Address': '주소',
'Addresses': '주소',
'Adequate food and water available': '적합한 식품 워터마크 사용',
'Adequate': '적절한',
'Admin Email': '관리자 전자 우편',
'Admin Name': 'Admin 이름',
'Administration': '관리',
'Adolescent (12-20)': 'adolescent (12-20)',
'Adolescent participating in coping activities': 'adolescent 활동에 참여하는 복사',
'Adult (21-50)': '성인 (21-50)',
'Adult ICU': '성인 icu',
'Adult Psychiatric': '성인 psychiatric',
'Adult female': '성인 여성',
'Adult male': '성인 남성',
'Adults in prisons': 'adults prisons 에서',
'Advanced:': '고급:',
'Advisory': '보안 권고문',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': '이 단추를 누른 후, 쌍체 항목 세트를 하나씩 표시됩니다. 참고로, 다른 원하는 각 쌍에서 하나의 솔루션을 선택하십시오.',
'Age Group': '연령 그룹',
'Age group does not match actual age.': '연령 그룹 실제 나이 일치하지 않습니다.',
'Age group': '연령 그룹',
'Aggravating factors': 'aggravating 요소',
'Agriculture': '농업',
'Air Transport Service': 'air transport 서비스',
'Aircraft Crash': '항공기 충돌',
'Aircraft Hijacking': '항공기 하이잭이라고',
'Airport Closure': '공항 처리완료',
'Airspace Closure': 'airspace 마감',
'Alcohol': '알코올',
'Alert': '경보',
'All Inbound & Outbound Messages are stored here': '모든 인바운드 및 아웃바운드 메시지를 여기에 저장됩니다',
'All Resources': '모든 자원',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': '이 사이트에서 sahana software foundation 에서 제공하는 모든 데이터를 창의적 commons attribution 라이센스 하에서 licenced. 그러나, 모든 데이터 비롯됩니다. 각 항목의 소스 필드를 참조하십시오.',
'Allowed to push': '누름 수',
'Allows a Budget to be drawn up': '그릴 수 있게 예산',
'Allows authorized users to control which layers are available to the situation map.': '사용자가 허용하는 계층을 사용할 수 있는 상황 맵핑할 제어할 수 있습니다.',
'Alternative Item Details': '대체 항목 세부사항',
'Alternative Item added': '대체 항목 추가됨',
'Alternative Item deleted': '대체 항목 삭제',
'Alternative Item updated': '대체 항목 갱신',
'Alternative Item': '대체 품목',
'Alternative Items': '대체 항목',
'Alternative places for studying': '대체 연구하여 대한 작업공간',
'Ambulance Service': 'ambulance 서비스',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': '흡입구 (시스템, 웨어하우스 관리 시스템, 상품 추적, 공급망 관리, 조달 및 기타 자산 및 자원 관리 기능을 제공합니다.',
'An item which can be used in place of another item': '다른 항목 대신 사용할 수 있는 항목',
'Analysis of Completed Surveys': '분석 완료 조사 중',
'Animal Die Off': '동물 off die',
'Animal Feed': '피드 동물',
'Antibiotics available': 'antibiotics 사용',
'Antibiotics needed per 24h': 'antibiotics 24h 당 필요한',
'Apparent Age': '피상 연령',
'Apparent Gender': '피상 성별',
'Application Deadline': '어플리케이션 최종 기한',
'Approve': '승인',
'Approved': '승인된 날짜',
'Approver': '승인자',
'Arctic Outflow': 'arctic outflow',
'Areas inspected': '영역 검사',
'Assessment Details': '평가 세부사항',
'Assessment Reported': '보고된 평가',
'Assessment Summaries': '평가 요약',
'Assessment Summary Details': '평가 요약 세부사항',
'Assessment Summary added': '추가된 평가 요약',
'Assessment Summary deleted': '삭제된 평가 요약',
'Assessment Summary updated': '갱신된 평가 요약',
'Assessment added': '평가 추가',
'Assessment admin level': '평가 관리 레벨',
'Assessment deleted': '평가 삭제',
'Assessment timeline': '시간선 평가',
'Assessment updated': '갱신된 평가',
'Assessment': '평가',
'Assessments Needs vs. Activities': '평가 vs. 하는 활동',
'Assessments and Activities': '평가 및 활동',
'Assessments': '평가',
'Assessments:': '평가:',
'Assessor': '평가자',
'Asset Details': '자산 세부사항',
'Asset Log Details': '자산 세부사항 로그',
'Asset Log Empty': '자산 빈 로그',
'Asset Log Entry Added - Change Label': '로그 항목-자산 변경할 레이블 추가',
'Asset Log Entry deleted': '자산 로그 항목 삭제',
'Asset Log Entry updated': '자산 로그 항목 갱신',
'Asset Log': '자산 로그',
'Asset Management': '자산 관리',
'Asset Number': '자산 번호',
'Asset added': '자산 추가됨',
'Asset deleted': '삭제된 자산',
'Asset removed': '제거된 자산',
'Asset updated': '자산 업데이트됨',
'Asset': '자산',
'Assets are resources which are not consumable but are expected back, so they need tracking.': '자산을 이용 않았으나 예상되는 경우 자원, 트래킹 합니다.',
'Assets': '자산',
'Assign Group': '그룹 지정',
'Assign Staff': '스태프 지정',
'Assign to Org.': '조직 할당하십시오.',
'Assign to Organization': '조직 지정',
'Assign to Person': '사용자 지정',
'Assign to Site': '사이트 지정',
'Assign': '지정',
'Assigned By': '지정한',
'Assigned To': '지정 대상',
'Assigned to Organization': '지정된 조직',
'Assigned to Person': '지정된 사용자',
'Assigned to Site': '지정된 사이트',
'Assigned to': '지정 대상',
'Assigned': '지정됨',
'At/Visited Location (not virtual)': '/방문한 위치 (가상)',
'Attend to information sources as described in <instruction>': '참석 정보가 소스에 에 설명된 대로<instruction>',
'Attribution': 'attribution',
'Author': '작성자',
'Availability': '가용성',
'Available Alternative Inventories': '사용 명세를 대체',
'Available Beds': '사용 가능한 의료용',
'Available Inventories': '사용 가능한 자원',
'Available Messages': '사용 가능한 메시지',
'Available Records': '사용 가능한 레코드',
'Available databases and tables': '데이터베이스 및 테이블 사용',
'Available for Location': '사용 위치',
'Available from': '사용 가능 원본',
'Available in Viewer?': '사용 표시기에서?',
'Available until': '사용 가능한 최종 시간',
'Avalanche': 'avalanche',
'Avoid the subject event as per the <instruction>': '주제 이벤트 대로 당 피하기<instruction>',
'Background Color for Text blocks': '텍스트 블록의 배경 색상',
'Background Color': '배경색',
'Bahai': '바하이',
'Baldness': '탈모',
'Banana': '바나나',
'Bank/micro finance': '은행/마이크로 파이낸스',
'Barricades are needed': 'barricades 필요',
'Base Layer?': '기본 ssl?',
'Base Location': '기본 위치',
'Base Site Set': '기본 사이트 설정',
'Baseline Data': '기준선 데이터',
'Baseline Number of Beds': '기준선 번호 의료용 중',
'Baseline Type Details': '기준선 유형 세부사항',
'Baseline Type added': '기준선 유형 추가',
'Baseline Type deleted': '기준선 유형 삭제',
'Baseline Type updated': '기준선 유형 갱신',
'Baseline Type': '기준선 유형',
'Baseline Types': '기준선 유형',
'Baseline added': '기준선 추가',
'Baseline deleted': '기준선 삭제',
'Baseline number of beds of that type in this unit.': '이 유형의 의료용 기준선 번호.',
'Baseline updated': '기준선 갱신',
'Baselines Details': '기준선 세부사항',
'Baselines': '기준선',
'Basic Assessment Reported': '기본 평가에서 보고된',
'Basic Assessment': '기본 평가',
'Basic Details': '기본 세부사항',
'Basic reports on the Shelter and drill-down by region': '기본, shelter 및 drill-down region 에 대한 보고서',
'Baud rate to use for your modem - The default is safe for most cases': '전송 속도를 사용자 모뎀의-기본 사용할 대부분의 스레드세이프인지',
'Baud': '보오율',
'Beam': '빔',
'Bed Capacity per Unit': 'bed 용량 단위',
'Bed Capacity': 'bed 용량',
'Bed Type': 'bed 유형',
'Bed type already registered': 'bed 유형이 이미 등록되었습니다.',
'Below ground level': '아래 접지선 레벨',
'Beneficiary Type': '수혜자입니다 유형',
'Biological Hazard': '생물학 위험',
'Biscuits': 'biscuits',
'Blizzard': 'blizzard',
'Blood Type (AB0)': '혈액 유형 (AB0)',
'Blowing Snow': 'blowing 눈',
'Boat': 'boat',
'Bodies found': '본문을 찾을 수 없음',
'Bodies recovered': '복구된 단체',
'Body Recovery Request': '본문 복구 요청',
'Body Recovery Requests': '본문 복구 요청',
'Body': 'body',
'Bomb Explosion': '폭발 bomb',
'Bomb Threat': 'bomb 위협',
'Bomb': 'bomb',
'Border Color for Text blocks': '경계 색상 텍스트 블록',
'Brand Details': '브랜드 세부사항',
'Brand added': '브랜드 추가',
'Brand deleted': '브랜드 삭제',
'Brand updated': '갱신된 브랜드',
'Brand': '브랜드',
'Brands': '브랜드',
'Bricks': 'bricks',
'Bridge Closed': '브릿지 닫힘',
'Bucket': '버킷',
'Buddhist': '불교식 달력',
'Budget Details': '예산 세부사항',
'Budget Updated': '갱신된 예산',
'Budget added': '예산 추가',
'Budget deleted': '예산 삭제',
'Budget updated': '갱신된 예산',
'Budget': '예산',
'Budgeting Module': '모듈 예산',
'Budgets': '예산',
'Buffer': '버퍼',
'Bug': '버그',
'Building Assessments': '빌드 평가',
'Building Collapsed': '빌드 접힌',
'Building Name': '빌딩 이름',
'Building Safety Assessments': '빌드 안전 평가',
'Building Short Name/Business Name': '빌드 짧은 이름/비즈니스 이름',
'Building or storey leaning': '빌드 또는 storey leaning',
'Built using the Template agreed by a group of NGOs working together as the': '템플리트 동의된 ngos 그룹에서 함께 사용하여 작업',
'Bulk Uploader': '벌크 uploader',
'Bundle Contents': '번들 컨텐츠',
'Bundle Details': 'Bundle 세부사항',
'Bundle Updated': '번들 갱신',
'Bundle added': '번들 추가',
'Bundle deleted': '번들 삭제',
'Bundle updated': '번들 갱신',
'Bundle': '번들',
'Bundles': '번들',
'Burn ICU': '충전하지 icu',
'Burn': '소모시키다',
'Burned/charred': '하드코드된/charred',
'By Facility': '기능에 의해',
'By Inventory': '자원 명세',
'CBA Women': 'cba 여성',
'CSS file %s not writable - unable to apply theme!': 'css 파일% s not installed — unable 테마를 적용할 쓰기-!',
'Calculate': '계산',
'Camp Coordination/Management': '자녀를 조정/관리',
'Camp Details': '자녀를 세부사항',
'Camp Service Details': '서비스 세부사항 캠프',
'Camp Service added': '자녀를 서비스 추가',
'Camp Service deleted': '자녀를 서비스 삭제',
'Camp Service updated': '자녀를 서비스 갱신',
'Camp Service': '자녀를 서비스',
'Camp Services': '자녀를 서비스',
'Camp Type Details': '자녀를 유형 세부사항',
'Camp Type added': '자녀를 유형 추가',
'Camp Type deleted': '자녀를 유형 삭제',
'Camp Type updated': '자녀를 유형 갱신',
'Camp Type': '자녀를 유형',
'Camp Types and Services': '자녀를 유형 및 서비스',
'Camp Types': '자녀를 유형',
'Camp added': '추가된 캠프',
'Camp deleted': '캠프 삭제됨',
'Camp updated': '갱신된 캠프',
'Camp': '캠프',
'Camps': 'camps',
'Can only disable 1 record at a time!': '한 번에 1 레코드 사용 불가능하게 할 수 있습니다.',
'Cancel Log Entry': '로그 항목 취소',
'Cancel Shipment': '선적 취소',
'Cancel': 'CANCEL(취소)',
'Canceled': '취소됨',
'Candidate Matches for Body %s': '후보자가 신체 %s 에 일치합니다',
'Canned Fish': '통조림에 든 생선',
'Cannot be empty': '비어있을 수 없음',
'Cannot disable your own account!': '사용자 고유 계정을 사용 불가능하게 할 수 없습니다.',
'Capacity (Max Persons)': '용량 (max 명)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'capture 정보 피해 희생 (tourists, passengers, 제품군에서 등) )',
'Capture Information on each disaster victim': 'capture 정보 각 피해 희생 (victim)',
'Capturing the projects each organization is providing and where': '각 프로젝트 제공하는 조직 및 캡처',
'Cardiology': 'cardiology',
'Cassava': 'cassava',
'Casual Labor': '일반 작업',
'Casualties': 'casualties',
'Catalog Details': '카탈로그 세부사항',
'Catalog Item added': '카탈로그 항목 추가',
'Catalog Item deleted': '카탈로그 항목 삭제',
'Catalog Item updated': '카탈로그 항목 갱신',
'Catalog Items': '카탈로그 항목',
'Catalog added': '카탈로그 추가',
'Catalog deleted': '카탈로그 삭제',
'Catalog updated': '카탈로그 갱신',
'Catalog': '카탈로그',
'Catalogs': '카탈로그',
'Categories': '범주',
'Category': '카테고리',
'Ceilings, light fixtures': 'ceilings, 표시등이 fixtures',
'Central point to record details on People': '중앙 사용자 레코드에 대한 자세한 내용은',
'Certificate Catalog': '인증서 카탈로그',
'Certificate Details': '인증서 세부사항',
'Certificate Status': '인증서 상태',
'Certificate added': '인증서 추가',
'Certificate deleted': '인증서가 삭제됨',
'Certificate updated': '인증서 갱신됨',
'Certificate': '인증',
'Certificates': '인증서',
'Certification Details': '인증 세부사항',
'Certification added': '인증 추가',
'Certification deleted': '인증 삭제',
'Certification updated': '인증 갱신',
'Certification': '인증',
'Certifications': '인증',
'Certifying Organization': '조직 인증',
'Change Password': '암호 변경',
'Check Request': '요청 확인',
'Check for errors in the URL, maybe the address was mistyped.': 'url 에 오류, maybe 주소를 잘못 확인하십시오.',
'Check if the URL is pointing to a directory instead of a webpage.': 'url 디렉토리 대신 웹 가리키는지 확인하십시오.',
'Check outbox for the message status': '메시지 상태를 outbox 확인',
'Check to delete': '삭제하려면 선택하십시오.',
'Checked': '확인',
'Checklist created': '체크리스트 작성',
'Checklist deleted': '삭제할 체크리스트',
'Checklist of Operations': '운영 점검 목록',
'Checklist updated': '갱신 점검',
'Checklist': '체크리스트',
'Chemical Hazard': '화학적 위험',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': '화학적, 생물학적, 방사능, 핵무기 또는 고성능 폭발물 위협 또는 공격',
'Chicken': '닭고기',
'Child (2-11)': '하위 (2-11)',
'Child (< 18 yrs)': '하위 (< 18 세의)',
'Child Abduction Emergency': '하위 abduction 비상',
'Child headed households (<18 yrs)': '하위 머리 households (<18 세)',
'Child': '하위',
'Children (2-5 years)': '하위 (2-5 년)',
'Children (5-15 years)': '하위 (5-15 년)',
'Children (< 2 years)': '하위 (< 2 년)',
'Children in adult prisons': '하위 prisons 에 성인',
'Children in boarding schools': '하위 boarding 학교 에서',
'Children in homes for disabled children': '하위 홈은 의 하위에 대한 사용',
'Children in juvenile detention': '하위 juvenile detention)',
'Children in orphanages': '하위 orphanages 에',
'Children living on their own (without adults)': '하위 자신의 활성 (adults)',
'Children not enrolled in new school': '새 하위 학교 등록되어',
'Children orphaned by the disaster': '하위 피해 의해 분리되었으며',
'Children separated from their parents/caregivers': '하위에 상위/caregivers 구분됩니다',
'Children that have been sent to safe places': '안전한 위치에 전송된 하위',
'Children who have disappeared since the disaster': '누가 피해 이후 사라진 하위',
'Chinese (Taiwan)': '대만어',
'Cholera Treatment Capability': 'cholera 처리 기능',
'Cholera Treatment Center': 'cholera 진료 센터',
'Cholera Treatment': 'cholera 처리',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': '새 평가 및 팀 판단에 따라 새 게시를 선택하십시오. 전체 빌드 영향을 미치는 심각한 조건을 통지일 안전하지 의 접지. 로컬화된 심각한 전반적인 중간 제한 조건을 사용해야 할 수도 있습니다. 작업공간 검사된 기본 시작 시 placard. post 모든 중요한 진입점을 전혀 다른 placards.',
'Christian': '서기',
'Church': '교회',
'City': 'city',
'Civil Emergency': 'civil 비상',
'Cladding, glazing': 'cladding, glazing',
'Click on the link %(url)s to reset your password': '링크를 누르십시오. %(url)s 사용자 암호 재설정',
'Click on the link %(url)s to verify your email': '링크를 누르십시오. %(url)s 사용자의 전자 검증하십시오',
'Clinical Laboratory': '임상 연구소',
'Clinical Operations': '임상 조작',
'Clinical Status': '임상 상태',
'Closed': '닫힘',
'Clothing': '의류',
'Cluster Details': '클러스터 세부사항',
'Cluster Distance': '클러스터 거리',
'Cluster Subsector Details': '클러스터 subsector 세부사항',
'Cluster Subsector added': '클러스터 하부영역 추가',
'Cluster Subsector deleted': '클러스터 하부영역 삭제',
'Cluster Subsector updated': '클러스터 subsector 갱신',
'Cluster Subsector': '클러스터 하부영역',
'Cluster Subsectors': '클러스터 subsectors',
'Cluster Threshold': '클러스터 임계값',
'Cluster added': '클러스터 추가',
'Cluster deleted': '클러스터 삭제',
'Cluster updated': '클러스터 갱신',
'Cluster(s)': '클러스터(들)',
'Clusters': '클러스터',
'Code': '코드',
'Cold Wave': '콜드 물결선',
'Collapse, partial collapse, off foundation': '접기, 부분 접기, foundation',
'Collective center': '콜렉티브에 center',
'Color for Underline of Subheadings': 'color 의 하위 underline 대한',
'Color of Buttons when hovering': '단추를 color 때 풍선',
'Color of bottom of Buttons when not pressed': '아래 단추 중 color 않을 때 눌렀습니다.',
'Color of bottom of Buttons when pressed': '아래 단추 중 color 때',
'Color of dropdown menus': '색상 드롭 다운 메뉴',
'Color of selected Input fields': 'color 선택한 입력 필드',
'Color of selected menu items': 'color 선택된 메뉴 항목',
'Columns, pilasters, corbels': '컬럼, pilasters, corbels',
'Combined Method': '결합 메소드',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': '나중에 제공됩니다. 이 사이트를 방문하여 모든 는 동일한 문제점을 경험하는 것처럼.',
'Come back later.': '나중에 제공됩니다.',
'Commercial/Offices': 'commercial/사무실',
'Commit Date': '확약 날짜',
'Commit from %s': '커미트% s',
'Commit': '확약',
'Commit. Status': '확약 STATUS',
'Commiting a changed spreadsheet to the database': '데이터베이스에 변경된 스프레드시트 확약',
'Commitment Added': '추가 확약',
'Commitment Canceled': '확약 취소됨',
'Commitment Details': '위탁 세부사항',
'Commitment Item Details': '확약 항목 세부사항',
'Commitment Item added': '확약 항목 추가됨',
'Commitment Item deleted': '확약 항목 삭제',
'Commitment Item updated': '확약 항목 갱신',
'Commitment Items': '확약 항목',
'Commitment Status': '확약 상태',
'Commitment Updated': '확약 갱신',
'Commitment': '약정',
'Commitments': '약정',
'Committed By': '커미트된 의해',
'Committed': '커미트됨',
'Committing Inventory': '자원 확약',
'Communication problems': '통신 문제점',
'Community Centre': '커뮤니티 centre',
'Community Health Center': '커뮤니티 health center',
'Community Member': '커뮤니티 구성원',
'Competencies': '능력',
'Competency Details': '자격 세부사항',
'Competency Rating Catalog': '능력 등급 카탈로그',
'Competency Rating Details': '능력 평가 세부사항',
'Competency Rating added': '능력 등급 추가됩니다',
'Competency Rating deleted': '능력 등급 삭제됨',
'Competency Rating updated': '능력 등급 갱신',
'Competency Ratings': '정격 능력',
'Competency added': '능력 추가',
'Competency deleted': '능력 삭제',
'Competency updated': '갱신된 능력',
'Competency': '능력',
'Complete': 'COMPLETE(완료)',
'Completed': '완료됨',
'Compose': '작성',
'Compromised': '손상됨',
'Concrete frame': '콘크리트 프레임',
'Concrete shear wall': '콘크리트 절단 벽',
'Condition': '조건',
'Configurations': '구성',
'Configure Run-time Settings': '런타임 구성 설정',
'Confirm Shipment Received': '수신된 운송물 확인',
'Confirmed': '확인됨',
'Confirming Organization': '조직 확인',
'Conflict Details': '충돌 세부사항',
'Conflict Resolution': '충돌 해결',
'Consignment Note': '상품인지를 주',
'Constraints Only': '제한조건은',
'Consumable': '소비재',
'Contact Data': '데이터 문의하십시오.',
'Contact Details': '연락처 세부사항',
'Contact Info': '연락처 정보',
'Contact Information Added': '문의처 정보 추가',
'Contact Information Deleted': '정보는 삭제된 담당자',
'Contact Information Updated': '갱신된 접속 정보를',
'Contact Information': '연락처 정보',
'Contact Method': '연락 방법',
'Contact Name': '담당자 이름',
'Contact Person': '개인 연락처',
'Contact Phone': '담당자 전화',
'Contact details': '연락처 세부사항',
'Contact information added': '추가된 연락처 정보',
'Contact information deleted': '정보는 삭제된 담당자',
'Contact information updated': '갱신된 접속 정보를',
'Contact us': '문의',
'Contact': '연락처',
'Contacts': '연락처',
'Contents': '내용',
'Contributor': '기고자',
'Conversion Tool': '변환 도구',
'Cooking NFIs': '요리용 NFI',
'Cooking Oil': '요리용 오일',
'Coordinate Conversion': '좌표 변환',
'Coping Activities': '활동 복사',
'Copy': '복사',
'Corn': '옥수수',
'Cost Type': '비용 유형',
'Cost per Megabyte': '비용 한계',
'Cost per Minute': '분당 비용',
'Country of Residence': '거주 국가',
'Country': '국가',
'County': 'County(US 전용)',
'Course Catalog': '과정 카탈로그',
'Course Certificate Details': 'certicate 과정 세부사항',
'Course Certificate added': 'certicate 코스 추가',
'Course Certificate deleted': '코스 삭제 certicate',
'Course Certificate updated': '물론 certicate 갱신',
'Course Certificates': '과정 인증서',
'Course Details': '과정 세부사항',
'Course added': '과정 추가',
'Course deleted': '코스 삭제',
'Course updated': '과정 갱신',
'Course': '과정',
'Courses': '과정',
'Create & manage Distribution groups to receive Alerts': '작성 및 관리 경고를 수신하도록 분배 그룹',
'Create Activity Report': '활동 보고서 추가',
'Create Activity Type': '추가 활동 유형',
'Create Activity': '단위업무 추가',
'Create Assessment': '새 평가 추가',
'Create Asset': '자산 추가',
'Create Bed Type': '추가 bed 유형',
'Create Brand': '브랜드 추가',
'Create Budget': '예산 추가',
'Create Catalog Item': '카탈로그 항목 추가',
'Create Catalog': '카탈로그 추가',
'Create Certificate': '인증 추가',
'Create Checklist': '체크리스트 작성',
'Create Cholera Treatment Capability Information': 'cholera treatment 기능 정보 추가',
'Create Cluster Subsector': '클러스터 subsector 추가',
'Create Cluster': '클러스터 추가',
'Create Competency Rating': '능력 등급 추가',
'Create Contact': '연락처 추가',
'Create Course': '과정 추가',
'Create Dead Body Report': '데드 본문 보고서 추가',
'Create Event': '새 이벤트 작성',
'Create Facility': '기능 추가',
'Create Feature Layer': '추가 기능)',
'Create Group Entry': '그룹 항목 작성',
'Create Group': '그룹 추가',
'Create Hospital': '추가 병원',
'Create Identification Report': '식별 보고서 추가',
'Create Impact Assessment': '영향 평가 작성',
'Create Incident Report': '인시던트 보고서 추가',
'Create Incident': '추가 인시던트',
'Create Item Category': '항목에 카테고리 추가',
'Create Item Pack': '항목 팩 추가',
'Create Item': '새 항목 추가',
'Create Kit': '새 상품 추가',
'Create Layer': '계층 추가',
'Create Location': '위치 추가',
'Create Map Configuration': '맵 구성 추가',
'Create Marker': '마커 추가',
'Create Member': '회원 추가',
'Create Mobile Impact Assessment': '모바일 영향 평가 작성',
'Create Office': '추가 사무실',
'Create Organization': '조직 추가',
'Create Personal Effects': '개인 효과 추가',
'Create Project': '새 프로젝트 추가',
'Create Project': '프로젝트 추가',
'Create Projection': '추가 투영',
'Create Rapid Assessment': '신속한 평가 생성하기',
'Create Report': '새 보고서 추가',
'Create Request': '요청 생성하기',
'Create Resource': '자원 추가',
'Create River': '추가 강',
'Create Role': '역할 추가',
'Create Room': '강의실 추가',
'Create Scenario': '새 시나리오 작성',
'Create Sector': '섹터를 추가',
'Create Service Profile': '서비스 프로파일 추가',
'Create Shelter Service': '추가 shelter 서비스',
'Create Shelter Type': 'shelter 유형 추가',
'Create Shelter': '추가 shelter',
'Create Skill Type': '추가 기술 항목 유형',
'Create Skill': '스킬 추가',
'Create Staff Member': '스태프 구성원 추가',
'Create Status': '상태 추가',
'Create Task': '작업 생성하기',
'Create Task': '태스크 추가',
'Create Theme': '테마 추가',
'Create User': '사용자 추가',
'Create Volunteer': '지원자 추가',
'Create Warehouse': '웨어하우스 추가',
'Create a Person': '개인 추가',
'Create a group entry in the registry.': '레지스트리에 있는 그룹 항목을 작성하십시오.',
'Create, enter, and manage surveys.': '작성, 입력, 관리하는 조사합니다.',
'Creation of Surveys': '설문 생성하기',
'Credential Details': '신임 세부사항',
'Credential added': 'Credential 추가',
'Credential deleted': 'Credential 삭제',
'Credential updated': '신임 갱신',
'Credentialling Organization': 'credentialling 조직',
'Credentials': '신임',
'Credit Card': '신용 카드',
'Crime': '범죄',
'Criteria': '기준',
'Currency': '통화',
'Current Entries': '현재 항목',
'Current Group Members': '현재 그룹 구성원',
'Current Identities': '현재 id',
'Current Location': '현재 위치',
'Current Log Entries': '현재 로그 항목',
'Current Memberships': '현재 멤버쉽',
'Current Records': '현재 레코드',
'Current Registrations': '현재 등록',
'Current Status': '현재 상태',
'Current Team Members': '현재 팀 구성원',
'Current Twitter account': '현재 twitter 계정',
'Current community priorities': '현재 커뮤니티 우선순위',
'Current general needs': '현재 일반 합니다',
'Current greatest needs of vulnerable groups': '현재 가장 필요한 취약한 그룹',
'Current health problems': '현재 성능 문제점',
'Current number of patients': '현재 환자 중',
'Current problems, categories': '현재 문제점, 카테고리',
'Current problems, details': '현재 문제점, 세부사항',
'Current request': '현재 요청',
'Current response': '현재 응답',
'Current session': '현재 세션',
'Currently no Certifications registered': '현재 등록된 인증서가 없습니다',
'Currently no Competencies registered': '현재 등록된 능력 항목이 없습니다',
'Currently no Course Certificates registered': '현재 등록된 교육과정 인증서가 없습니다',
'Currently no Credentials registered': '현재 등록된 신용(신분, 자격) 증명서가 없습니다',
'Currently no Missions registered': '현재 등록된 임무가 없습니다',
'Currently no Skill Equivalences registered': '현재 등록된 기술 종류가 없습니다',
'Currently no Trainings registered': 'trainings 현재 등록된',
'Currently no entries in the catalog': '현재 카탈로그에 항목이 없습니다',
'DNA Profile': '프로파일 dna',
'DNA Profiling': 'dna 프로파일링',
'Dam Overflow': 'dam 오버플로우',
'Damage': '손상',
'Dangerous Person': '위험한 사람',
'Dashboard': '대시보드',
'Data uploaded': '데이터 업로드',
'Data': '데이터',
'Database': '데이터베이스',
'Date & Time': '날짜 및 시간',
'Date Available': '운송 가능 날짜',
'Date Received': '수령 날짜',
'Date Requested': '요청된 날짜',
'Date Required': '요청 날짜',
'Date Sent': '날짜 송신',
'Date Until': '날짜',
'Date and Time': '날짜 및 시간',
'Date and time this report relates to.': '이 보고서는 날짜 및 시간 관련시킵니다.',
'Date of Birth': '생일',
'Date of Latest Information on Beneficiaries Reached': '날짜 받아야 에 대한 최신 정보',
'Date of Report': '보고서 날짜',
'Date/Time of Find': '날짜/시간 찾기',
'Date/Time when found': '날짜/시간 때',
'Date/Time when last seen': '날짜/시간 때 마지막으로 표시된',
'Date/Time': '날짜/시간',
'Dead Body Details': '데드 본문 세부사항',
'Dead Body Reports': '데드 본문 보고서',
'Dead Body': '데드 본문',
'Dead body report added': '데드 본문 보고서 추가',
'Dead body report deleted': '데드 본문 보고서 삭제',
'Dead body report updated': '데드 본문 보고서 갱신',
'Deaths in the past 24h': 'deaths 지난 24h',
'Decimal Degrees': '10진수(도)',
'Decision': '결정',
'Decomposed': '분해될',
'Default Height of the map window.': '기본 맵 창의 높이.',
'Default Map': '기본 맵',
'Default Marker': '디폴트 마커',
'Default Width of the map window.': '맵 창의 기본 너비.',
'Default synchronization policy': '기본 동기화 정책',
'Defecation area for animals': 'defecation 영역에 대한 동물',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': '해당 자원 (인력, 자산 및 설비) 의 할당 시나리오를 정의하십시오.',
'Defines the icon used for display of features on handheld GPS.': '휴대용 gps 의 기능을 표시하기 위해 사용되는 아이콘을 정의합니다.',
'Defines the icon used for display of features on interactive map & KML exports.': '대화식 맵 및 kml 내보내기 기능을 표시하기 위해 사용되는 아이콘을 정의합니다.',
'Defines the marker used for display & the attributes visible in the popup.': '표시 및 볼 수 있는 팝업에서 속성에 대해 사용된 마커를 정의합니다.',
'Degrees must be a number between -180 and 180': '도 사이의-180 및 180 숫자여야 합니다.',
'Dehydration': '디하이드레이션',
'Delete Alternative Item': '대안 항목 삭제',
'Delete Assessment Summary': '평가 요약을 삭제',
'Delete Assessment': '평가 삭제',
'Delete Asset Log Entry': '자산 삭제 로그 항목',
'Delete Asset': '자산 삭제',
'Delete Baseline Type': '삭제할 기준선 유형',
'Delete Baseline': '기준선 삭제',
'Delete Brand': '브랜드 삭제',
'Delete Budget': '예산 삭제',
'Delete Bundle': '번들 삭제',
'Delete Catalog Item': '목록 항목 삭제',
'Delete Catalog': '카탈로그 삭제',
'Delete Certificate': '인증서 삭제',
'Delete Certification': '인증 삭제',
'Delete Cluster Subsector': '클러스터 삭제 subsector',
'Delete Cluster': '클러스터 삭제',
'Delete Commitment Item': '삭제 확약 항목',
'Delete Commitment': '삭제 확약',
'Delete Competency Rating': '삭제할 능력 등급',
'Delete Competency': '능력 삭제',
'Delete Contact Information': '연락처 정보 삭제',
'Delete Course Certificate': '과정 certicate 삭제',
'Delete Course': '코스 삭제',
'Delete Credential': '권한 정보 삭제',
'Delete Document': '문서 삭제',
'Delete Donor': 'doner 삭제',
'Delete Entry': '항목 삭제',
'Delete Event': '이벤트 삭제',
'Delete Feature Layer': '삭제 기능을 layer',
'Delete Group': '그룹 삭제',
'Delete Hospital': '삭제할 병원',
'Delete Image': '이미지 삭제',
'Delete Impact Type': '삭제 영향 유형',
'Delete Impact': '영향 삭제',
'Delete Incident Report': '인시던트 보고서 삭제',
'Delete Item Category': '항목 카테고리 삭제',
'Delete Item Pack': '항목 팩 삭제',
'Delete Item': '항목 삭제',
'Delete Job Role': '작업 역할 삭제',
'Delete Key': '키 삭제',
'Delete Kit': 'delete kit',
'Delete Layer': '레이어 삭제',
'Delete Level 1 Assessment': '레벨 1 평가 삭제',
'Delete Level 2 Assessment': '레벨 2 평가 삭제',
'Delete Location': '위치 삭제',
'Delete Map Configuration': '맵 구성 삭제',
'Delete Marker': '마커 삭제',
'Delete Membership': '멤버쉽 삭제',
'Delete Message': '메시지 삭제',
'Delete Mission': '삭제할 임무',
'Delete Need Type': '삭제 하는 유형',
'Delete Need': '삭제 합니다',
'Delete Office': '삭제할 사무실',
'Delete Organization': '조직 삭제',
'Delete Peer': '피어 삭제',
'Delete Person': '작업자 삭제',
'Delete Photo': '사진 삭제',
'Delete Population Statistic': '인구 통계 삭제',
'Delete Position': '삭제 위치',
'Delete Project': '프로젝트 삭제',
'Delete Projection': '프로젝션 삭제',
'Delete Rapid Assessment': '빠른 평가 삭제',
'Delete Received Item': '수신된 삭제 항목',
'Delete Received Shipment': '수신된 shipment 삭제',
'Delete Record': '레코드 삭제',
'Delete Report': '보고서 삭제',
'Delete Request Item': '삭제 요청을 항목',
'Delete Request': '요청 삭제',
'Delete Resource': '자원 삭제',
'Delete Room': '강의실 삭제',
'Delete Scenario': '시나리오 삭제',
'Delete Section': '섹션 삭제',
'Delete Sector': '삭제할 섹터',
'Delete Sent Item': '삭제할 보낸 항목',
'Delete Sent Shipment': '송신된 shipment 삭제',
'Delete Service Profile': '서비스 프로파일 삭제',
'Delete Setting': '설정 삭제',
'Delete Skill Equivalence': '기술 equivalence 삭제',
'Delete Skill Provision': '삭제할 기술 제공',
'Delete Skill Type': '삭제할 항목 유형',
'Delete Skill': '스킬 삭제',
'Delete Staff Type': 'delete 직원 유형',
'Delete Status': '삭제 상태',
'Delete Subscription': '등록 삭제',
'Delete Subsector': '삭제 subsector',
'Delete Survey Answer': '삭제할 서베이 응답',
'Delete Survey Question': '서베이 질문 삭제',
'Delete Survey Series': '삭제할 서베이 시리즈',
'Delete Survey Template': '서베이 템플리트 삭제',
'Delete Training': '연계 삭제',
'Delete Unit': '단위 삭제',
'Delete User': '사용자 삭제',
'Delete Volunteer': 'delete 지원자',
'Delete from Server?': '서버에서?',
'Delete': '삭제',
'Delphi Decision Maker': 'delphi 결정',
'Demographic': '데모그래픽',
'Demonstrations': '데모',
'Dental Examination': 'dental 검사',
'Dental Profile': 'dental 프로파일',
'Describe the condition of the roads to your hospital.': '도로, 귀하의 병원 조건을 설명합니다.',
"Describe the procedure which this record relates to (e.g. 'medical examination')": '이 레코드 (예: \\ " 의학 examination\\ " 과) 프로시저를 설명합니다',
'Description of Contacts': '문의처 설명',
'Description of defecation area': 'defecation 영역의 설명',
'Description of drinking water source': '설명 식수 (소스)',
'Description of sanitary water source': '설명 오수관 (소스)',
'Description of water source before the disaster': '물 소스 설명 피해 전',
'Desire to remain with family': '원하는 계열과의 남아',
'Destination': '대상',
'Destroyed': '파괴',
'Details field is required!': '세부사항 필드는 필수입니다!',
'Details': '세부사항',
'Diaphragms, horizontal bracing': 'diaphragms, 수평 bracing',
'Dignitary Visit': 'dignitary 방문하십시오.',
'Direction': '방향',
'Disable': '사용 불가능',
'Disabled participating in coping activities': '사용 활동을 복사하는 참여',
'Disabled': '사용 불가능',
'Disabled?': '사용?',
'Disaster Victim Identification': '재해 victim 식별',
'Disaster Victim Registry': '재해 victim 레지스트리',
'Disaster clean-up/repairs': '피해 up/repairs 정리',
'Discharges/24hrs': '소모됨/24hrs',
'Discussion Forum on item': '토론 포럼 항목',
'Discussion Forum': '토론 포럼',
'Disease vectors': '질병 벡터',
'Dispensary': 'dispensary',
'Displaced Populations': '프로덕트를 모집단의',
'Displaced': '프로덕트를',
'Display Polygons?': '다각형 표시?',
'Display Routes?': '표시할 라우트?',
'Display Tracks?': '표시 추적합니다?',
'Display Waypoints?': '표시 waypoints?',
'Distance between defecation area and water source': '거리 defecation 영역 및 물 소스 사이의',
'Distance from %s:': '% 의 거리:',
'Distance(Kms)': '거리 (kms)',
'Distribution groups': '분배 그룹',
'Distribution': '배포판',
'District': '특별지방자치단체',
'Do you really want to delete these records?': '이 레코드를 삭제하시겠습니까?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': '이 받은 shipment 취소하시겠습니까? 인벤토리에서 항목이 제거됩니다. 이 조치는 실행 취소할 수 없습니다.',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': '이 shipment 보낸 취소하시겠습니까? 이 항목은 명세로 리턴됩니다. 이 조치는 실행 취소할 수 없습니다.',
'Do you want to receive this shipment?': '이 shipment?',
'Do you want to send these Committed items?': '이 확약된 항목을 보내시겠습니까?',
'Do you want to send this shipment?': '이 shipment 보내시겠습니까?',
'Document Details': '문서 세부사항',
'Document Scan': '문서 스캔',
'Document added': '문서 추가',
'Document deleted': '문서가 삭제됨',
'Document updated': '문서가 갱신됨',
'Documents and Photos': '문서 및 사진',
'Documents': '문서',
'Does this facility provide a cholera treatment center?': '이 시설이 콜레라 진료 센터를 제공합니까?',
'Doing nothing (no structured activity)': '지원 불필요 (조직적 활동 없음)',
'Dollars': '달러',
'Domain': '도메인(domain)',
'Domestic chores': '국내 chores',
'Donated': '기부',
'Donation Certificate': 'donation 인증',
'Donation Phone #': 'donation 전화번호 #',
'Donor Details': '제공자 세부사항',
'Donor added': '추가 공여',
'Donor deleted': 'doner 삭제됨',
'Donor updated': 'doner 갱신된',
'Donor': 'doner',
'Donors Report': 'donors 보고서',
'Door frame': '프레임 도어를',
'Download PDF': 'PDF 다운로드',
'Draft': 'DRAFT(초안)',
'Drainage': '드레인',
'Drawing up a Budget for Staff & Equipment across various Locations.': '직원 및 장비와 예산을 다양한 위치에 그림.',
'Drill Down by Group': '드릴 다운 그룹별',
'Drill Down by Incident': '드릴 다운 의해 인시던트',
'Drill Down by Shelter': '드릴 다운 shelter 의해',
'Driving License': '운전 면허증',
'Dug Well': 'dug 아니라',
'Duplicate?': '중복?',
'Duration': 'DURATION',
'Dust Storm': '심한 먼지',
'EMS Reason': 'ems 이유',
'EMS Status': 'ems 상태',
'ER Status Reason': 'ER 상태 원인',
'ER Status': 'ER 상태',
'Early Recovery': '빠른 복구',
'Earthquake': '지진',
'Edit Activity': '활동 편집',
'Edit Address': '주소 편집',
'Edit Alternative Item': '대체 항목 편집',
'Edit Application': '애플리케이션 편집',
'Edit Assessment Summary': '평가 요약 편집',
'Edit Assessment': '평가 편집',
'Edit Asset Log Entry': '자산 로그 항목 편집',
'Edit Asset': '자산 편집',
'Edit Baseline Type': '편집할 기준선 유형',
'Edit Baseline': '기준선 편집',
'Edit Brand': '브랜드 편집',
'Edit Budget': '예산 편집',
'Edit Bundle': '번들 편집',
'Edit Camp Service': '자녀를 서비스 편집',
'Edit Camp Type': '자녀를 유형 편집',
'Edit Camp': '자녀를 편집',
'Edit Catalog Item': '카탈로그 항목 편집',
'Edit Catalog': '카탈로그 편집',
'Edit Certificate': '인증서 편집',
'Edit Certification': '인증 편집',
'Edit Cluster Subsector': '클러스터 subsector 편집',
'Edit Cluster': '클러스터 편집',
'Edit Commitment Item': '편집 확약 항목',
'Edit Commitment': '편집 확약',
'Edit Competency Rating': '능력 등급 편집',
'Edit Competency': '자격 편집',
'Edit Contact Information': '연락처 정보 편집',
'Edit Contact': '연락처 편집',
'Edit Contents': '컨텐츠 편집',
'Edit Course Certificate': 'certicate 코스 편집',
'Edit Course': '코스 편집',
'Edit Credential': '신임 편집',
'Edit Dead Body Details': '데드 본문 세부사항 편집',
'Edit Description': '설명 편집',
'Edit Details': '세부사항 편집',
'Edit Disaster Victims': '피해 희생 편집',
'Edit Document': '문서 편집',
'Edit Donor': '편집 제공자',
'Edit Email Settings': '이메일 설정 편집',
'Edit Entry': '항목 편집',
'Edit Event': '이벤트 편집',
'Edit Facility': '기능 편집',
'Edit Feature Layer': '편집 기능)',
'Edit Flood Report': '플러드 보고서 편집',
'Edit Gateway Settings': '게이트웨이 설정 편집',
'Edit Group': '그룹 편집',
'Edit Hospital': '병원 편집',
'Edit Human Resource': '인적 자원 편집',
'Edit Identification Report': '식별 보고서 편집',
'Edit Identity': 'id 편집',
'Edit Image Details': '이미지 세부사항 편집',
'Edit Impact Type': '편집 유형 영향',
'Edit Impact': '영향 편집',
'Edit Incident Report': '인시던트 보고서 편집',
'Edit Inventory Item': '재고 항목 편집',
'Edit Item Category': '항목 카테고리 편집',
'Edit Item Pack': '항목 팩 편집',
'Edit Item': '항목 편집',
'Edit Job Role': '작업 역할 편집',
'Edit Key': '키 편집',
'Edit Kit': '키트 편집',
'Edit Layer': '계층 편집',
'Edit Level %d Locations?': '레벨% d 위치 편집?',
'Edit Level 1 Assessment': '레벨 1 평가 편집',
'Edit Level 2 Assessment': '레벨 2 평가 편집',
'Edit Location': '위치 편집',
'Edit Log Entry': '로그 항목 편집',
'Edit Map Configuration': '맵 구성 편집',
'Edit Map Services': '맵 서비스 편집',
'Edit Marker': '마커 편집',
'Edit Membership': '멤버십 편집',
'Edit Message': '메시지 편집',
'Edit Messaging Settings': '메시징 설정 편집',
'Edit Mission': '임무 편집',
'Edit Modem Settings': '모뎀 설정 편집',
'Edit Need Type': '필요한 유형 편집',
'Edit Need': '필요한 편집',
'Edit Office': '부재 편집',
'Edit Options': '편집 옵션',
'Edit Organization': '조직 편집',
'Edit Parameters': '매개변수 편집',
'Edit Peer Details': '피어 세부사항 편집',
'Edit Person Details': '편집할 사용자 세부사항',
'Edit Personal Effects Details': '개인 효과 세부사항 편집',
'Edit Photo': '사진 편집',
'Edit Population Statistic': '인구 통계 편집',
'Edit Position': '직위 편집',
'Edit Problem': '문제점 편집',
'Edit Project': '편집 프로젝트',
'Edit Projection': '프로젝션 편집',
'Edit Rapid Assessment': '빠른 평가 편집',
'Edit Received Item': '수신된 항목 편집',
'Edit Received Shipment': '수신된 shipment 편집',
'Edit Record': '레코드 편집',
'Edit Registration Details': '등록 세부사항 편집',
'Edit Registration': '등록 편집',
'Edit Request Item': '편집 항목 요청',
'Edit Request': '요청 편집',
'Edit Resource': '자원 편집',
'Edit River': '편집 강',
'Edit Role': '역할 편집',
'Edit Room': '미팅룸 편집',
'Edit Scenario': '시나리오 편집',
'Edit Sector': '편집 부문',
'Edit Sent Item': '보낸 항목 편집',
'Edit Setting': '설정 편집',
'Edit Settings': '설정 편집',
'Edit Shelter Service': 'shelter 서비스 편집',
'Edit Shelter Type': '편집 shelter 유형',
'Edit Shelter': '편집 shelter',
'Edit Skill Equivalence': '기술 equivalence 편집',
'Edit Skill Provision': '기술 provision 편집',
'Edit Skill Type': '편집 기술 유형',
'Edit Skill': '기술 항목 편집',
'Edit Solution': '솔루션 편집',
'Edit Staff Type': '편집 직원 유형',
'Edit Subscription': '등록 편집',
'Edit Subsector': '편집 subsector',
'Edit Survey Answer': '서베이 응답 편집',
'Edit Survey Question': '서베이 질문 편집',
'Edit Survey Series': '서베이 시리즈 편집',
'Edit Survey Template': '서베이 템플리트 편집',
'Edit Task': '태스크 편집',
'Edit Team': '팀 편집',
'Edit Theme': '테마 편집',
'Edit Themes': '테마 편집',
'Edit Ticket': '티켓 편집',
'Edit Track': '트랙 편집',
'Edit Training': '교육 편집',
'Edit Tropo Settings': 'tropo 설정 편집',
'Edit User': '사용자 편집',
'Edit Volunteer Availability': '지원자 가용성 편집',
'Edit Volunteer Details': '지원자 세부사항 편집',
'Edit Warehouse': '웨어하우스 편집',
'Edit current record': '현재 레코드 편집',
'Edit message': '메시지 편집',
'Edit': '편집',
'Editable?': '편집 가능 여부',
'Education materials received': '교육 자료 수신',
'Education materials, source': '교육 자료, 소스',
'Education': '교육',
'Effects Inventory': '자원 명세 효과',
'Eggs': '계란',
'Either a shelter or a location must be specified': '는 shelter 또는 위치를 지정해야 합니다.',
'Either file upload or document URL required.': '파일 업로드 또는 필요한 문서의 url.',
'Either file upload or image URL required.': '파일 업로드 또는 필요한 이미지 url.',
'Elderly person headed households (>60 yrs)': '친인척 개인 households (>60 세) 방향',
'Electrical': '전기',
'Electrical, gas, sewerage, water, hazmats': '전기, 가스, sewerage, 물, hazmats',
'Elevators': '엘리베이터',
'Email Address': '이메일 주소',
'Email Settings': '이메일 설정',
'Email settings updated': '이메일 설정 갱신',
'Email': '이메일',
'Embalming': 'embalming',
'Embassy': 'embassy',
'Emergency Capacity Building project': '용량 비상 프로젝트 빌드',
'Emergency Department': '긴급 department',
'Emergency Shelter': '비상 shelter',
'Emergency Support Facility': '긴급 지원 기능',
'Emergency Support Service': '긴급 지원 서비스',
'Emergency Telecommunications': 'telecommunications 비상',
'Enable/Disable Layers': '계층 사용/사용 안함',
'Enabled': '사용 가능',
'End Date': '종료 날짜',
'End date should be after start date': '종료 날짜는 시작 날짜 이후여야 합니다',
'End date': '종료 날짜',
'End of Period': '기간의 끝',
'Enter Coordinates:': '좌표를 입력하십시오.',
'Enter a GPS Coord': 'gps 좌표 입력',
'Enter a name for the spreadsheet you are uploading (mandatory).': '스프레드시트 대한 이름 (업로드하는) 입력하십시오.',
'Enter a new support request.': '새 지원 요청을 입력하십시오.',
'Enter a unique label!': '고유한 레이블을 입력하십시오!',
'Enter a valid date before': '올바른 날짜 입력',
'Enter a valid email': '올바른 전자 우편.',
'Enter a valid future date': '올바른 미래 날짜를 입력하십시오',
'Enter some characters to bring up a list of possible matches': '일부 문자를 일치사항이 목록을 표시하십시오.',
'Enter some characters to bring up a list of possible matches.': '일부 문자를 일치사항이 목록을 표시하십시오.',
'Enter tags separated by commas.': '쉼표로 분리된 태그를 입력하십시오.',
'Enter the same password as above': '위와 동일한 암호를 입력하십시오.',
'Entered': '입력됨',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': '전화번호를 입력하는 선택적이지만, 그렇게 sms 메시지를 수신하도록 등록할 수 있습니다.',
'Entry deleted': '항목 삭제됨',
'Environment': '환경',
'Equipment': '장비',
'Error encountered while applying the theme.': '오류가 있는 테마를 적용하여 발견했습니다.',
'Error in message': '오류 메시지',
"Error logs for '%(app)s'": '오류 로그를 "%(app)s"',
'Errors': '오류',
'Est. Delivery Date': 'est. 전달 날짜',
'Estimated # of households who are affected by the emergency': '긴급 재난 상황에 영향을 받는 예상 가구수',
'Estimated # of people who are affected by the emergency': '긴급 재난 상황에 영향을 받는 예상 인원',
'Estimated Overall Building Damage': '예상되는 전체 빌딩 손실',
'Estimated total number of people in institutions': '총 예상 기관 사용자 수',
'Euros': 'euros',
'Evacuating': 'evacuating',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': '이 메시지의 정보를 평가하십시오. (이 값은 공용 경고 응용프로그램에서 사용해야 합니다. )',
'Event Details': '이벤트 세부사항',
'Event added': '이벤트 추가',
'Event deleted': '이벤트 삭제',
'Event updated': '이벤트가 업데이트되었습니다.',
'Event': '이벤트',
'Events': '이벤트',
'Example': '예제',
'Exceeded': '초과됨',
'Excellent': '최상',
'Exclude contents': '제외할 내용',
'Excreta disposal': 'excreta 폐기',
'Execute a pre-planned activity identified in <instruction>': '식별된 사전 계획된 활동 실행<instruction>',
'Exercise': '연습',
'Exercise?': '연습?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': '모든 연습을 화면을 워터마크를 모든 통지를 접두부가 있어야 합니다.',
'Existing Placard Type': '기존 placard 유형',
'Existing food stocks': '식음료권 기존 주식을',
'Existing location cannot be converted into a group.': '기존 위치 그룹 변환할 수 없습니다.',
'Exits': '엑시트',
'Experience': '경력',
'Expiry Date': '만료 날짜',
'Explosive Hazard': '폭발 위험',
'Export Data': '데이터 내보내기',
'Export Database as CSV': '데이터베이스 반출 csv',
'Export in GPX format': 'gpx 형식으로 내보내기',
'Export in KML format': 'kml 형식으로 내보내기',
'Export in OSM format': '내보내기 osm 형식으로',
'Export in PDF format': 'PDF 형식으로 반출',
'Export in RSS format': '내보내기 rss 형식으로',
'Export in XLS format': 'xls 형식으로 내보내기',
'Export': '내보내기',
'Exterior Only': '외부 전용',
'Exterior and Interior': '외부 및 내부',
'Eye Color': '눈 색상',
'Facebook': '페이스북',
'Facial hair, color': '얼굴 털, 색상',
'Facial hair, type': '얼굴 털. 모양',
'Facial hear, length': 'facial 듣고, 길이',
'Facilities': '기능',
'Facility Details': '기능 세부사항',
'Facility Operations': '기능 조작',
'Facility Status': '기능 상태',
'Facility Type': '설비 유형',
'Facility added': '기능 추가',
'Facility or Location': '기능 또는 위치',
'Facility removed': '제거된 기능',
'Facility updated': '기능 갱신',
'Fail': '실패',
'Failed!': '실패!',
'Fair': '양호',
'Falling Object Hazard': '오브젝트 hazard 폴백하기',
'Families/HH': '제품군에서/hh',
'Family tarpaulins received': 'tarpaulins 받은 제품군',
'Family tarpaulins, source': '제품군 tarpaulins, 소스',
'Family': '제품군',
'Family/friends': '제품군/friends',
'Farmland/fishing material assistance, Rank': 'farmland/어업 자재 지원, rank',
'Fax': '팩스',
'Feature Layer Details': '기능 계층 세부사항',
'Feature Layer added': '기능 계층 추가',
'Feature Layer deleted': '기능 계층 삭제',
'Feature Layer updated': 'feature layer 갱신된',
'Feature Layers': '기능 계층',
'Feature Namespace': '기능 이름',
'Feature Request': '기능 요청',
'Feature Type': '기능 유형',
'Features Include': '기능 포함',
'Female headed households': '여성 머리 households',
'Few': '몇 가지',
'Field Hospital': '필드 병원',
'Field': '필드',
'File': '파일',
'Fill in Latitude': '위도를 입력하십시오',
'Fill in Longitude': '경도를 입력하십시오',
'Filter Field': '필터 필드',
'Filter Value': '필터 값',
'Filter': '필터',
'Find Dead Body Report': '데드 본문 보고서 찾기',
'Find Hospital': '병원 찾기',
'Find Person Record': '개인 정보 찾기',
'Find Volunteers': '자원봉사자 찾기',
'Find a Person Record': '개인 레코드 찾기',
'Find': '찾기',
'Finder': '파인더',
'Fingerprint': '지문',
'Fingerprinting': '지문 분석',
'Fingerprints': '지문',
'Finished Jobs': '완료된 작업',
'Fire suppression and rescue': '화재 진압 및 이동',
'Fire': '화재',
'First Name': '이름',
'First name': '이름',
'Fishing': '피싱',
'Flash Flood': '분류성 홍수',
'Flash Freeze': '급속 동결',
'Flexible Impact Assessments': '유연한 영향 평가',
'Flood Alerts show water levels in various parts of the country': '홍수 경보 표시 표시점 레벨을 국가 의 다양한 파트에서',
'Flood Alerts': '홍수 경보',
'Flood Report Details': '플러드 보고서 세부사항',
'Flood Report added': '플러드 보고서 추가',
'Flood Report deleted': '플러드 보고서 삭제',
'Flood Report updated': '플러드 보고서 갱신',
'Flood Report': '플러드 보고서',
'Flood Reports': '플러드 보고서',
'Flood': '홍수',
'Flow Status': '플로우 상태',
'Fog': 'fog',
'Food Supply': '식품 공급',
'Food assistance': '식품 지원',
'Food': '음식',
'Footer file %s missing!': '바닥글 파일% 가 누락되었습니다!',
'Footer': '바닥글',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': '국가에 대한 이 ISO2 코드, town 수, 장소 locode 이 됩니다.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': '각 동기화 partner, 지정된 시간 간격 후에 실행되는 기본 동기화 작업입니다. 또한 필요에 따라 사용자 정의할 수 많은 동기화 작업을 설정할 수 있습니다. 시작하려면 오른쪽에 있는 링크를 누르십시오.',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': '향상된 보안, 사용자 이름 및 암호를 입력하십시오 것이 권장됩니다, 관리자 및 조직 내의 다른 시스템의 사용자 이름 및 암호를 사용하여 uuid 대해 동기화 -> sync 파트너를 추가할 알림',
'For live help from the Sahana community on using this application, go to': '이 sahana 커뮤니티에서 이 응용프로그램 사용에 대한 실시간 도움말, 로 이동하십시오',
'For messages that support alert network internal functions': '경보 네트워크를 내부 기능을 지원하는 메시지',
'For more details on the Sahana Eden system, see the': '이 sahana eden 시스템 에 대한 자세한 내용은 를 참조하십시오',
'For more information, see': '자세한 정보는 의 내용을 참조하십시오.',
'For': '예를 들어,',
'Forest Fire': 'forest 화재',
'Formal camp': '정규 캠프',
'Format': '형식',
'Forms': '양식',
'Found': '발견됨',
'Foundations': 'foundations',
'Freezing Drizzle': 'drizzle 보류',
'Freezing Rain': '얼어붙은 비',
'Freezing Spray': '분무가 보류',
'French': '프랑스어',
'Friday': '금요일',
'From Inventory': '이전 재고',
'From Location': '원래 위치',
'From Organization': '원래 조직',
'From': '시작',
'Frost': 'frost',
'Fulfil. Status': '사건. STATUS',
'Fulfillment Status': '이행 상태',
'Full beard': '전체 beard',
'Full': '가득참',
'Fullscreen Map': '전체 화면 맵',
'Functions available': '사용 가능한 기능',
'Funding Organization': '자금 조직',
'Funeral': 'funeral',
'Further Action Recommended': '권장 조치',
'GIS Reports of Shelter': 'gis shelter 의 보고서',
'GIS integration to view location details of the Shelter': 'gis shelter 통합, 위치 세부사항 보기',
'GPS Marker': 'gps 마커',
'GPS Track File': 'gps 추적 파일',
'GPS Track': 'gps 추적',
'GPX Track': 'gpx 트랙',
'GRN Status': 'grn 상태',
'GRN': 'grn',
'Gale Wind': 'gale 바람',
'Gap Analysis Map': '맵 차이 분석',
'Gap Analysis Report': '차이 분석 보고서',
'Gap Analysis': '갭 분석',
'Gap Map': '맵 차이',
'Gap Report': '공백 보고서',
'Gateway Settings': '게이트웨이 설정',
'Gateway settings updated': '게이트웨이 설정 갱신',
'Gateway': '게이트웨이',
'Gender': '성별',
'General Comment': '일반적인 의견',
'General Medical/Surgical': '일반 의료/수술',
'General emergency and public safety': '일반적인 긴급 및 공공 안전',
'General information on demographics': '인구통계에 대한 일반적 정보',
'General': '일반',
'Generator': '생성기',
'Geocode': '지오코드',
'Geocoder Selection': '지오코더 선택',
'Geometry Name': 'geometry 이름',
'Geophysical (inc. landslide)': 'geophysical (inc. landslide)',
'Geotechnical Hazards': 'geotechnical 위험',
'Geotechnical': 'geotechnical',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'geraldo 모듈 사용 중인 python-이 내에서 pdf 출력 installing 합니다!',
'Get incoming recovery requests as RSS feed': '수신 복구 요청 get rss 피드',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': '이미지의 간단한 설명을, 는 내용에 그림 (선택적) 에서 볼 수 있습니다.',
'Give information about where and when you have seen them': '위치에 대한 및 때 본 정보를 제공합니다',
'Global Messaging Settings': '글로벌 전달 설정',
'Go to Request': '요청 이동하십시오',
'Good Condition': '양호한지',
'Good': '양호',
'Goods Received Note': 'goods 참고 수신된',
'Government UID': '정부 uid',
'Government building': '정부 빌드',
'Government': '정부',
'Grade': '등급',
'Greek': '그리스어',
'Green': '초록색',
'Ground movement, fissures': '육상 이동, fissures',
'Ground movement, settlement, slips': '육상 이동, 결제, 전표',
'Group Description': '그룹 설명',
'Group Details': '그룹 세부사항',
'Group Member added': '그룹 구성원 추가',
'Group Members': '그룹 구성원',
'Group Memberships': '그룹 멤버쉽',
'Group Name': '그룹 이름',
'Group Title': '그룹 제목',
'Group Type': '그룹 유형',
'Group added': '추가된 그룹',
'Group deleted': '그룹 삭제',
'Group description': '그룹 설명',
'Group updated': '그룹 갱신',
'Group': '그룹',
'Groups removed': '그룹 제거',
'Groups': '그룹',
'Guest': '게스트',
'HR Manager': 'HR 관리자',
'Hail': '우박',
'Hair Color': '머리 색상',
'Hair Length': '십자선 길이',
'Hair Style': '십자선 스타일',
'Has data from this Reference Document been entered into Sahana?': '(이 참조 문서의 데이터를 sahana 입력됩니다?',
'Has the Certificate for receipt of the shipment been given to the sender?': '있는 운송물 수령 인증 송신자에게 제공되었습니다?',
'Has the GRN (Goods Received Note) been completed?': '있는 grn (goods 수신했습니다) 완료되었습니다?',
'Hazard Pay': '지불 위험',
'Hazardous Material': '위험물',
'Hazardous Road Conditions': '위험 조건.',
'Header Background': '헤더 배경',
'Header background file %s missing!': '헤더 백그라운드 파일% 가 누락되었습니다!',
'Headquarters': '본사',
'Health care assistance, Rank': '의료 지원, rank',
'Health center with beds': 'health center 를 의료용',
'Health center without beds': 'health center 의료용 없이',
'Health center': 'Health Center',
'Health services status': '상태 서비스 상태',
'Health': '시스템 상태',
'Healthcare Worker': 'healthcare worker',
'Heat Wave': '열 물결선',
'Heat and Humidity': '열 및 습도',
'Height (cm)': '높이 (cm)',
'Height (m)': '높이 (m)',
'Height': '높이',
'Help': '도움말',
'Helps to monitor status of hospitals': '도움이 병원 상태 모니터',
'Helps to report and search for missing persons': '데 보고서 및 누락된 사람이 검색하려면',
'Here are the solution items related to the problem.': '여기에 문제와 관련된 솔루션을 항목입니다.',
'Heritage Listed': 'heritage 나열됩니다',
'Hierarchy Level 0 Name (i.e. Country)': '계층 구조 레벨 0 이름 (국가)',
'Hierarchy Level 1 Name (e.g. State or Province)': '계층 구조 레벨 1 이름 (예: 도)',
'Hierarchy Level 2 Name (e.g. District or County)': '계층 구조 레벨 2 이름 (예: 지역 또는 국가)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': '계층 구조 레벨 3 이름 (예: 구/군/읍/village)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': '계층 구조 레벨 4 이름 (예: 근방)',
'Hierarchy Level 5 Name': '계층 구조 레벨 5 이름',
'High Water': '최고 사용 표시점',
'High': '높음',
'History': '내력',
'Hit the back button on your browser to try again.': '브라우저의 뒤로 단추를 사용하여 다시 시도하십시오.',
'Holiday Address': '공휴일 주소',
'Home Address': '집 주소',
'Home Country': '자택 국가',
'Home Crime': '범죄 홈',
'Home': '홈',
'Hospital Details': '병원 세부사항',
'Hospital Status Report': '병원 상태 보고서',
'Hospital information added': '병원 정보 추가',
'Hospital information deleted': '병원 정보 삭제',
'Hospital information updated': '병원 정보 갱신',
'Hospital status assessment.': '병원 상태 평가.',
'Hospital': '병원',
'Hospitals': '병원',
'Hot Spot': '핫 스팟',
'Hour': '시간',
'Hours': '시간',
'Household kits received': '가족 킷을 받은',
'Household kits, source': '가족 킷, 소스',
'How does it work?': '작동 방법?',
'How is this person affected by the disaster? (Select all that apply)': '이 방법을 사람은 피해 받지? (해당되는 사항을 모두 선택하십시오)',
'How long will the food last?': '얼마 동안, 식품 마지막?',
'How many Boys (0-17 yrs) are Dead due to the crisis': '얼마나 많은 boys (0-17 세) 인해 위기 에 폐기된 경우',
'How many Boys (0-17 yrs) are Injured due to the crisis': '얼마나 많은 boys (0-17 세) 인해 위기 에 injured.',
'How many Boys (0-17 yrs) are Missing due to the crisis': '얼마나 많은 boys (0-17 세) 인해 위기 누락되었습니다',
'How many Girls (0-17 yrs) are Dead due to the crisis': '얼마나 많은 girls (0-17 세) 인해 위기 에 폐기된 경우',
'How many Girls (0-17 yrs) are Injured due to the crisis': '얼마나 많은 girls (0-17 세) 인해 위기 에 injured.',
'How many Girls (0-17 yrs) are Missing due to the crisis': '얼마나 많은 girls (0-17 세) 인해 위기 에 누락되었습니다.',
'How many Men (18 yrs+) are Dead due to the crisis': '얼마나 많은 남성 (18 yrs+) 인해 위기 에 폐기된 경우',
'How many Men (18 yrs+) are Injured due to the crisis': '얼마나 많은 남성 (18 yrs+) 인해 위기 에 injured.',
'How many Men (18 yrs+) are Missing due to the crisis': '얼마나 많은 남성 (18 yrs+) 로 인해 위기 누락되었습니다',
'How many Women (18 yrs+) are Dead due to the crisis': '얼마나 많은 여성 (18 yrs+) 인해 위기 에 폐기된 경우',
'How many Women (18 yrs+) are Injured due to the crisis': '얼마나 많은 여성 (18 yrs+) 인해 위기 에 injured.',
'How many Women (18 yrs+) are Missing due to the crisis': '얼마나 많은 여성 (18 yrs+) 로 인해 위기 누락되었습니다',
'How many days will the supplies last?': '일, 공급품 마지막?',
'How many new cases have been admitted to this facility in the past 24h?': '얼마나 많은 새 유스 지난 24h 이 기능을 admitted?',
'How many of the patients with the disease died in the past 24h at this facility?': '얼마나 많은 환자 중 질병 이 기능에 지난 24h 중지되었습니다?',
'How many patients with the disease are currently hospitalized at this facility?': '얼마나 많은 환자를 질병 과 현재 이 기능에 hospitalized?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': '얼마나 많은 세부사항이 표시됩니다. 높은 확대/축소 레벨 세부사항 많은 것을, 광역. 낮은 확대/축소 레벨로 광역 보지 않습니다 세부사항 상위 레벨.',
'Human Resource Details': '인적 자원 세부사항',
'Human Resource Management': '인적 자원 관리',
'Human Resource added': '추가된 인적 자원',
'Human Resource removed': '인적 자원 제거',
'Human Resource updated': '인적 자원 갱신',
'Human Resource': '인적 자원',
'Human Resources Management': '인적 자원 관리',
'Hurricane Force Wind': '허리케인 강제 바람',
'Hurricane': '허리케인',
'Hygiene NFIs': '조치 nfis',
'Hygiene kits received': '예방 킷을 받은',
'Hygiene kits, source': '예방 kits, 소스',
'Hygiene practice': '조치 사례',
'Hygiene problems': '문제점 예방',
'Hygiene': '조치',
'I am available in the following area(s)': '다음 영역 (s) 에서 사용 가능한 am',
'ID Tag Number': 'ID 태그 번호',
'ID Tag': 'ID 태그',
'ID type': 'ID 유형',
'Ice Pressure': 'ice 압력',
'Identification Report': '보고서 식별',
'Identification Reports': '보고서 식별',
'Identification Status': '상태 식별',
'Identified as': '식별된',
'Identified by': '식별됩니다',
'Identity Details': '항목 세부사항',
'Identity added': 'id 추가',
'Identity deleted': '삭제된 id',
'Identity updated': '항목이 갱신 되었습니다',
'If a ticket was issued then please provide the Ticket ID.': '티켓 발행된 경우, 티켓 id 를 제공하십시오.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': '사용자가 이 도메인 전자 우편 주소, 승인자 필드 자신의 여부 및 담당자에 의해 추가 승인이 필요한지 여부를 판별하는 데 사용됩니다.',
'If it is a URL leading to HTML, then this will downloaded.': 'html 이끄는 url 이면 이 다운로드되지 않습니다.',
'If neither are defined, then the Default Marker is used.': '모두 정의된 경우, 다음 기본 마커가 사용됩니다.',
'If no marker defined then the system default marker is used': '그런 경우, 시스템 디폴트 마커 정의된 마커 사용됩니다.',
'If no, specify why': '아니오인 경우, 지정',
'If none are selected, then all are searched.': '아무 것도 선택되지 않은 경우, 모든 검색됩니다.',
'If the location is a geographic area, then state at what level here.': '위치는 지리 영역에서 경우 레벨을 여기에 상태.',
'If the request type is "Other", please enter request details here.': '요청 유형이 \\ " other\\ " 입니다, 요청 세부사항을 입력하십시오.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': '이 필드가 채워진 경우 도메인 지정된 사용자를 자동으로 이 조직의 직원은 지정됩니다',
'If this is set to True then mails will be deleted from the server after downloading.': '이 true 로 설정된 경우 메일 서버에서 다운로드한 후에 삭제됩니다.',
'If this record should be restricted then select which role is required to access the record here.': '이 레코드 제한되어야 하는 그런 역할이 레코드 여기에 액세스하는 데 필요합니다.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': '이 레코드 제한해야 하는 경우 다음 역할 레코드 여기에 액세스할 수 있습니다.',
'If yes, specify what and by whom': '예, 및 지정하십시오.',
'If yes, which and how': '예, 및',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': '참조 문서 입력하지 않은 경우, 전자 이 데이터를 확인할 수 있도록 표시됩니다.',
'If you know what the Geonames ID of this location is then you can enter it here.': '어떤 geonames id 이 위치의 아는 경우, 여기에 입력할 수 있습니다.',
'If you know what the OSM ID of this location is then you can enter it here.': '어떤 osm id 이 위치를 아는 경우 여기에 입력할 수 있습니다.',
'If you need to add a new document then you can click here to attach one.': '새 문서를 추가해야 할 경우에는 여기에 하나 첨부할 수 있습니다.',
'If you want several values, then separate with': '여러 값, 구분하십시오',
'If you would like to help, then please': '도움말을 보려는 경우',
'Illegal Immigrant': '잘못된 immigrant',
'Image Details': '이미지 세부사항',
'Image Tags': '이미지 태그',
'Image Type': '이미지 유형',
'Image Upload': '이미지 업로드',
'Image added': '이미지 추가',
'Image deleted': '이미지 삭제',
'Image updated': '이미지 갱신',
'Image': '이미지',
'Imagery': '이미지',
'Impact Assessments': '영향 평가',
'Impact Details': '영향 세부사항',
'Impact Type Details': '영향 유형 세부사항',
'Impact Type added': '영향 유형 추가',
'Impact Type deleted': '영향 유형 삭제',
'Impact Type updated': '영향 유형 갱신',
'Impact Type': '영향 유형',
'Impact Types': '임팩트 유형',
'Impact added': '영향 추가됩니다',
'Impact deleted': '영향 삭제',
'Impact updated': '갱신된 영향',
'Impacts': '영향',
'Import & Export Data': '데이타 가져오기 및 내보내기',
'Import Data': '데이터 가져오기',
'Import Jobs': '가져오기 작업',
'Import and Export': '가져오기 및 내보내기',
'Import from Ushahidi Instance': 'ushahidi 인스턴스 가져오기',
'Import if Master': '가져올 마스터 경우',
'Import multiple tables as CSV': '가져오기 여러 테이블을 csv 로',
'Import': '가져오기',
'Import/Export': '가져오기/내보내기',
'Important': '중요',
'Importantly where there are no aid services being provided': '지원 서비스가 제공되지 않는 곳에서는 중요하게',
'Importing data from spreadsheets': '스프레드시트에서 데이터 가져오기',
'Improper decontamination': '부적절한 decontamination',
'Improper handling of dead bodies': '부적절한 취급 데드 본문의',
'In Catalogs': '카탈로그에',
'In Inventories': '재고가 있는',
'In Process': '진행 중',
'In Progress': '진행 중',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': '창 레이아웃 maximises 맵 창을 채울 수 없으므로 하는 큰 값을 설정하십시오.',
'Inbound Mail Settings': '인바운드 메일 설정',
'Incident Categories': '인시던트 범주',
'Incident Report Details': '인시던트 세부사항 보고서',
'Incident Report added': '추가된 보고하십시오.',
'Incident Report deleted': '인시던트 보고서 삭제',
'Incident Report updated': '갱신된 보고하십시오.',
'Incident Report': '인시던트 보고서',
'Incident Reporting System': '인시던트 보고 시스템',
'Incident Reporting': '인시던트 보고',
'Incident Reports': '인시던트 보고서',
'Incident': '인시던트',
'Incidents': '인시던트',
'Incoming Shipment canceled': '들어오는 shipment 취소됨',
'Incoming Shipment updated': 'shipment 갱신된 수신',
'Incoming': '수신',
'Incomplete': '완료되지 않음',
'Individuals': '개인',
'Industrial Crime': '산업 범죄',
'Industrial': '산업',
'Industry Fire': '산업 화재',
'Infectious Disease (Hazardous Material)': 'infectious 질병 (위험)',
'Infectious Disease': 'infectious 질병',
'Infectious Diseases': 'infectious 질병에',
'Informal Leader': '비공식 리더',
'Informal camp': '비정규 캠프',
'Information gaps': '갭 정보',
'Infusion catheters available': 'infusion catheters 사용',
'Infusion catheters need per 24h': 'infusion catheters 24h 합니다',
'Infusion catheters needed per 24h': 'infusion catheters 24h 당 필요한',
'Infusions available': 'infusions 사용',
'Infusions needed per 24h': 'infusions 24h 당 필요한',
'Inspected': '검사됨',
'Inspection Date': '검사 날짜',
'Inspection date and time': '검사 날짜 및 시간',
'Inspection time': '검사 시간',
'Inspector ID': '검사원 ID',
'Instant Porridge': '인스턴트 porridge',
'Institution': '기관',
'Insufficient vars: Need module, resource, jresource, instance': '충분하지 않은 vars: 모듈, 자원, jresource, 인스턴스 합니다',
'Insufficient': '충분하지 않음',
'Intake Items': '흡입구 항목',
'Intergovernmental Organization': 'intergovernmental 조직',
'Interior walls, partitions': '내부 앞문에, 파티션',
'Internal State': '내부 상태',
'International NGO': '국제 ngo',
'International Organization': '국제',
'Interview taking place at': '인터뷰 시 일어나는',
'Invalid Query': '유효하지 않은 쿼리',
'Invalid request!': '올바르지 않은 요청!',
'Invalid ticket': '유효하지 않은 티켓',
'Invalid': '올바르지 않음',
'Inventories': '비축량이',
'Inventory Item Details': '재고 품목 세부사항',
'Inventory Item updated': '자원 명세 품목 갱신',
'Inventory Item': '재고 항목',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': '재고 품목을 해당 대상의 자산 전환됩니다 모두 표시됩니다 소비 공급품 & 포함됩니다.',
'Inventory Items': '재고 항목',
'Inventory Management': '인벤토리 관리',
'Inventory of Effects': '자원 명세 효과',
'Is editing level L%d locations allowed?': '허용 레벨 l% d 위치 편집?',
'Is it safe to collect water?': '이를 안전한 물 수집할 수 있습니까?',
'Is this a strict hierarchy?': '이 엄격한 계층?',
'Issuing Authority': '발행 권한자',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '이는 단지 둘째, 활성 캡처하지만 또한 각 영역에서 제공하는 프로젝트 범위에 대한 정보를 캡처합니다.',
'Item Added to Shipment': '운송 항목에 추가됩니다',
'Item Catalog Details': '카탈로그 항목 세부사항',
'Item Categories': '항목 카테고리',
'Item Category Details': '항목 카테고리 세부사항',
'Item Category added': '항목에 카테고리 추가',
'Item Category deleted': '삭제된 항목 카테고리',
'Item Category updated': '갱신된 항목 카테고리',
'Item Category': '항목 대 카테고리',
'Item Details': '품목 세부사항',
'Item Pack Details': '항목 팩 세부사항',
'Item Pack added': '항목 팩 추가',
'Item Pack deleted': '항목 팩 삭제',
'Item Pack updated': '항목 팩이 갱신되었습니다',
'Item Packs': '항목 팩들',
'Item added to Inventory': '항목 명세에 추가됩니다',
'Item added to shipment': '운송 항목에 추가됩니다',
'Item added': '항목 추가됨',
'Item already in Bundle!': '항목이 이미 번들에!',
'Item already in Kit!': '항목이 이미 킷에!',
'Item already in budget!': '항목이 이미 예산!',
'Item deleted': '항목 삭제됨',
'Item removed from Inventory': '항목 명세로부터 제거됩니다',
'Item updated': '항목 갱신',
'Items in Category can be Assets': '카테고리 항목 자산 수',
'Items': '항목',
'Japanese': '일본어',
'Jerry can': 'jerry 수',
'Job Role Catalog': '작업 역할 카탈로그',
'Job Role Details': '작업 역할 세부사항',
'Job Role added': '작업 역할 추가',
'Job Role deleted': '작업 역할 삭제',
'Job Role updated': '작업 역할 갱신',
'Job Role': '업무 역할',
'Job Roles': '직무',
'Job Title': '직위',
'Jobs': '작업',
'Journal Entry Details': '저널 항목 세부사항',
'Journal entry added': '저널 항목 추가',
'Journal entry deleted': '저널 항목 삭제',
'Journal entry updated': '저널 항목이 갱신됩니다',
'Journal': '문서철',
'Key Details': '키 세부사항',
'Key added': '키 추가',
'Key deleted': '키 삭제',
'Key updated': '키 갱신',
'Key': '키',
'Keys': '키',
'Kit Contents': '키트 내용',
'Kit Details': '킷 세부사항',
'Kit Updated': '갱신된 상품',
'Kit added': '상품 추가',
'Kit deleted': '킷 삭제',
'Kit updated': '갱신된 상품',
'Kit': '킷',
'Kits': '킷',
'Known Identities': '알려진 id',
'Known incidents of violence against women/girls': '인시던트 알려진 여성/girls 에 violence 의',
'Known incidents of violence since disaster': '인시던트 알려진 violence 재해 이후',
'Korean': '한국어',
'LICENSE': '라이센스',
'Lack of material': '자재 부족',
'Lack of school uniform': '학교 부족으로 uniform 중',
'Lack of supplies at school': '공급품 부족 시 학교',
'Lack of transport to school': '전송으로 인해 학교 중',
'Lactating women': 'lactating 여성',
'Language': '언어',
'Last Name': '성',
'Last known location': '마지막 알려진 위치에서',
'Last synchronization time': '마지막 동기화 시간',
'Last updated by': '마지막 갱신자',
'Last updated on': '마지막 업데이트 날짜',
'Last updated': '최종 업데이트 날짜',
'Latitude & Longitude': '위도 및 경도',
'Latitude is North-South (Up-Down).': '위도의 경우 북쪽-남쪽 (위로-아래로).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '적도는 위도 on 이면 양수, 남부 서반구를 있는 북부 서반구를 및 음수.',
'Latitude of Map Center': '위도 센터의 맵',
'Latitude of far northern end of the region of interest.': '관심 있는 리젼의 지금까지 북부 종료 위도입니다.',
'Latitude of far southern end of the region of interest.': '관심 있는 리젼의 지금까지 남부 종료 위도입니다.',
'Latitude should be between': '위도 사이여야 합니다',
'Latitude': '위도',
'Law enforcement, military, homeland and local/private security': '법률 시행, 군사, 국토 및 로컬/개인용 보안',
'Layer Details': 'ssl 세부사항',
'Layer added': '계층 추가',
'Layer deleted': '레이어 삭제',
'Layer updated': '갱신된 ssl',
'Layer': '계층',
'Layers updated': '갱신된 레이어를',
'Layers': '계층',
'Leader': '리더',
'Legend Format': '범례 형식',
'Length (m)': '길이 (m)',
'Level 1 Assessment Details': '레벨 1 평가 세부사항',
'Level 1 Assessment added': '레벨 1 평가 추가',
'Level 1 Assessment deleted': '레벨 1 평가 삭제',
'Level 1 Assessment updated': '레벨 1 평가 갱신',
'Level 1 Assessments': '평가 레벨 1',
'Level 1': '레벨 1',
'Level 2 Assessment Details': '레벨 2 평가 세부사항',
'Level 2 Assessment added': '레벨 2 평가 추가',
'Level 2 Assessment deleted': '레벨 2 평가 삭제',
'Level 2 Assessment updated': '레벨 2 평가 갱신',
'Level 2 Assessments': '평가 레벨 2',
'Level 2 or detailed engineering evaluation recommended': '레벨 2 상세 엔지니어링 평가 권장',
'Level 2': '레벨 2',
'Level': '레벨',
'Library support not available for OpenID': '라이브러리 지원 사용 openid 대한',
'LineString': '선스트링',
'List / Add Baseline Types': '목록/기준선 추가 유형',
'List / Add Impact Types': '목록/추가 영향 유형',
'List / Add Services': '목록/서비스',
'List / Add Types': '목록/추가 유형',
'List Activities': '활동 나열',
'List All Assets': '모든 자산',
'List All Catalog Items': '모든 카탈로그 항목',
'List All Commitments': '모든 commitments',
'List All Entries': '목록에서 모든 항목',
'List All Item Categories': '목록에서 모든 항목 카테고리',
'List All Memberships': '모든 멤버쉽',
'List All Received Shipments': '수신된 모든 운송물을',
'List All Records': '모든 레코드',
'List All Requested Items': '요청된 모든 목록 항목',
'List All Requests': '모든 요청',
'List All Sent Shipments': '목록에서 모든 보낸 운송물을',
'List All': '모두 나열',
'List Alternative Items': '목록에서 대체 항목',
'List Assessment Summaries': '목록에서 평가 요약',
'List Assessments': '목록에서 평가',
'List Assets': '자산 나열',
'List Availability': '가용성 목록',
'List Baseline Types': '목록에서 기준선 유형',
'List Baselines': '목록에서 기준선을',
'List Brands': '목록에서 브랜드',
'List Budgets': '예산 목록',
'List Bundles': 'Bundle 나열',
'List Camp Services': '목록에서 자녀를 서비스',
'List Camp Types': '목록 유형 캠프',
'List Camps': '목록 camps',
'List Catalog Items': '카탈로그 항목 목록',
'List Catalogs': '카탈로그 목록',
'List Certificates': '인증서 목록',
'List Certifications': '인증 목록',
'List Checklists': '점검 목록',
'List Cluster Subsectors': '클러스터 subsectors',
'List Clusters': '클러스터 목록',
'List Commitment Items': '목록 항목 확약',
'List Commitments': '목록에서 commitments',
'List Competencies': '목록에서 능력',
'List Competency Ratings': '목록에서 능력 등급',
'List Conflicts': '목록 충돌',
'List Contact Information': '연락처 목록 정보',
'List Contacts': '담당자 나열',
'List Course Certificates': '과정 목록 certicates',
'List Courses': '과정 목록',
'List Credentials': '권한 목록',
'List Current': '현재 목록',
'List Documents': '문서 목록',
'List Donors': '목록 donors',
'List Events': '이벤트 목록',
'List Facilities': '기능 목록',
'List Feature Layers': '계층 목록 기능',
'List Flood Reports': '목록에서 플러드 보고서',
'List Groups': '그룹을 나열합니다.',
'List Groups/View Members': '그룹/보기 멤버',
'List Hospitals': '목록에서 병원',
'List Human Resources': '인적 자원 목록',
'List Identities': '리스트 id',
'List Images': '이미지 목록',
'List Impact Assessments': '목록에서 영향 평가',
'List Impact Types': '목록에서 영향 유형',
'List Impacts': '목록에서 영향',
'List Incident Reports': '인시던트 목록 보고서',
'List Item Categories': '목록 항목 카테고리',
'List Item Packs': '리스트 항목이 팩',
'List Items in Inventory': '목록에서 항목을 명세에',
'List Items': '항목 나열',
'List Job Roles': '작업 역할 목록',
'List Keys': '키 나열',
'List Kits': '킷 목록',
'List Layers': '레이어 목록',
'List Level 1 Assessments': '목록에서 레벨 1 평가',
'List Level 1 assessments': '목록에서 레벨 1 평가',
'List Level 2 Assessments': '목록에서 레벨 2 평가',
'List Level 2 assessments': '목록에서 레벨 2 평가',
'List Locations': '위치 목록',
'List Log Entries': '로그 항목 목록',
'List Map Configurations': '목록에서 맵 구성',
'List Markers': '목록에서 표시문자',
'List Members': '구성원 목록',
'List Memberships': '멤버쉽 리스트',
'List Messages': '메시지 목록',
'List Missing Persons': '목록에서 누락된 사람이',
'List Missions': '목록에서 임무',
'List Need Types': '목록에서 필요한 유형을',
'List Needs': '목록에서 합니다',
'List Offices': '목록 offices',
'List Organizations': '조직 목록',
'List Peers': '피어 목록',
'List Personal Effects': '목록에서 개인 효과',
'List Persons': '사용자 목록',
'List Photos': '목록에서 사진',
'List Population Statistics': '목록에서 인구 통계',
'List Positions': '위치 목록',
'List Problems': '목록 문제점',
'List Projections': '프로젝션 목록',
'List Projects': '프로젝트 목록',
'List Rapid Assessments': '목록에서 신속한 평가',
'List Received Items': '목록에서 받은 항목',
'List Received Shipments': '목록에서 받은 운송물을',
'List Records': '레코드 목록',
'List Registrations': '등록 목록',
'List Reports': '목록 보고서',
'List Request Items': '요청 목록 항목',
'List Requests': '요청 목록',
'List Resources': '자원 표시',
'List Rivers': '목록에서 강',
'List Roles': '역할 리스트',
'List Rooms': '회의실 목록',
'List Scenarios': '시나리오 목록',
'List Sections': '목록 섹션',
'List Sectors': '목록에서 sectors',
'List Sent Items': '전송된 항목 목록',
'List Sent Shipments': '목록에서 보낸 운송물을',
'List Service Profiles': '목록에서 서비스 프로파일',
'List Settings': '목록 설정',
'List Shelter Services': 'shelter 서비스 목록',
'List Shelter Types': 'shelter 유형 목록',
'List Shelters': '목록 shelters',
'List Skill Equivalences': '목록에서 기술 equivalences',
'List Skill Provisions': '목록에서 기술 조항',
'List Skill Types': '목록 항목 유형',
'List Skills': '기술 목록',
'List Solutions': '솔루션 목록',
'List Staff Types': '목록에서 직원 유형',
'List Status': '목록 상태',
'List Subscriptions': '등록 목록',
'List Subsectors': '목록 subsectors',
'List Support Requests': '목록 지원 요청',
'List Survey Answers': '목록에서 서베이 응답을',
'List Survey Questions': '목록에서 서베이 질문',
'List Survey Series': '목록에서 서베이 시리즈',
'List Survey Templates': '목록에서 서베이 템플리트',
'List Tasks': '태스크 나열',
'List Teams': '팀 목록',
'List Themes': '목록 테마',
'List Tickets': '티켓 목록',
'List Tracks': '목록 트랙',
'List Trainings': '목록 trainings',
'List Units': '장치 목록',
'List Users': '사용자 나열',
'List Warehouses': '목록 웨어하우스를',
'List all': '모두 나열',
'List available Scenarios': '사용 가능한 시나리오',
'List of Items': '항목 목록',
'List of Missing Persons': '목록에서 누락된 개인',
'List of Peers': '피어 목록',
'List of Reports': '보고서 목록',
'List of Requests': '요청의 목록',
'List of Spreadsheets uploaded': '목록에서 스프레드시트를 업로드할',
'List of Spreadsheets': '목록 스프레드시트를',
'List of Volunteers for this skill set': '이 기술 volunteers 목록 설정',
'List of Volunteers': '목록 volunteers 의',
'List of addresses': '주소 목록',
'List unidentified': '목록에서 비식별',
'List/Add': '목록/추가',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': '누가 어떤 & where\\ " 수행 중인 나열합니다 \\ ". 릴리프 기관 자신의 활동을 조정하고',
'Live Help': '실시간 도움말',
'Load Cleaned Data into Database': '로드 정리된 데이터로 데이터베이스',
'Load Raw File into Grid': '로드 격자로 원시 파일',
'Loading': '로드 중',
'Local Name': '로컬 이름',
'Local Names': '로컬명',
'Location 1': '위치 1',
'Location 2': '위치 2',
'Location Details': '위치 세부사항',
'Location Hierarchy Level 0 Name': '위치 계층 구조 레벨 0 이름',
'Location Hierarchy Level 1 Name': '위치 계층 구조 레벨 1 이름',
'Location Hierarchy Level 2 Name': '위치 계층 구조 레벨 2 이름',
'Location Hierarchy Level 3 Name': '위치 계층 구조 레벨 3 이름',
'Location Hierarchy Level 4 Name': '위치 계층 구조 레벨 4 이름',
'Location Hierarchy Level 5 Name': '위치 계층 구조 레벨 5 이름',
'Location added': '위치 추가',
'Location deleted': '삭제된 위치',
'Location group cannot be a parent.': '위치 그룹 상위 수 없습니다.',
'Location group cannot have a parent.': '위치 그룹 상위 가질 수 없습니다.',
'Location groups can be used in the Regions menu.': '위치 그룹 regions 메뉴에서 사용할 수 있습니다.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': '위치 그룹을 맵에 표시됩니다 필터링하는 데 사용할 수 있으며 검색 결과에 그룹 위치를 다루는 엔티티만.',
'Location updated': '위치 갱신',
'Location': '위치',
'Location:': '위치:',
'Locations of this level need to have a parent of level': '이 레벨의 위치 상위 레벨의 있어야 합니다',
'Locations': '위치',
'Lockdown': '잠금',
'Log Entry Details': '로그 항목 세부사항',
'Log entry added': '로그 항목 추가',
'Log entry deleted': '로그 항목이 삭제됩니다',
'Log entry updated': '로그 항목이 갱신됩니다',
'Log': '로그',
'Login': '로그인',
'Logistics Management System': '물류 관리 시스템',
'Logistics': '물류',
'Logo file %s missing!': '로고 파일% 가 누락되었습니다!',
'Logo': '로고',
'Logout': '로그아웃',
'Long Text': '긴 텍스트',
'Longitude is West - East (sideways).': '경선 서쪽-동쪽 (옆으로).',
'Longitude is West-East (sideways).': '경선 서쪽-동쪽 (옆으로).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '경도는 본초 자오선 은 (greenwich mean time) 인 양수, 중동, 유럽 및 아시아. 경도를 음수 서쪽에, 대서양 및 미국.',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '경도는 본초 자오선 은 (greenwich, 영국) 를 통해 인 양수, 중동, 유럽 및 아시아. 경도를 음수 서쪽에, 대서양 및 미국.',
'Longitude of Map Center': '경도 센터의 맵',
'Longitude of far eastern end of the region of interest.': '지금까지 원하는 영역의 끝 동부 경도입니다.',
'Longitude of far western end of the region of interest.': '원하는 영역의 맨 끝에 서부 경도입니다.',
'Longitude should be between': '경도 사이여야 합니다',
'Longitude': '경도',
'Lost Password': '잊어버린 암호',
'Lost': '분실됨',
'Low': '낮음',
'Magnetic Storm': '자기 먹회색',
'Major Damage': '주요 손상',
'Major expenses': '주요 비용',
'Major outward damage': '주요 밑면을 손상',
'Make Commitment': '확약 확인하십시오',
'Make New Commitment': '새 확약 확인하십시오',
'Make Request': '요청',
'Make preparations per the <instruction>': '노드당 준비를 하십시오<instruction>',
'Manage Relief Item Catalogue': '관리 릴리프 항목 카탈로그',
'Manage Users & Roles': '사용자 및 역할',
'Manage Warehouses/Sites': '관리 웨어하우스를/사이트',
'Manage Your Facilities': '관리 기능',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': '공급 관리 요청, 자산, 직원 또는 기타 자원. 여기서 제공하는 요청한 인벤토리의 대해 일치합니다.',
'Manage requests of hospitals for assistance.': '병원 지원을 관리합니다.',
'Manage volunteers by capturing their skills, availability and allocation': '관리 volunteers 기술, 가용성 및 캡처하여 할당',
'Managing Office': '사무실 관리',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '필수. geoserver, 계층 이름. wfs getcapabilities 내에서 이 콜론 (:) featuretype 이름 파트.',
'Mandatory. The URL to access the service.': '필수. 서비스 url 에 액세스하십시오.',
'Manual Synchronization': '수동 동기화',
'Manual': '수동',
'Many': '다수',
'Map Center Latitude': '맵 center 위도',
'Map Center Longitude': '지도 중심 경도',
'Map Configuration Details': '지도 구성 세부사항',
'Map Configuration added': '지도 구성이 추가되었습니다',
'Map Configuration deleted': '지도 구성이 삭제되었습니다',
'Map Configuration removed': '지도 구성이 삭제되었습니다',
'Map Configuration updated': '맵 구성 갱신',
'Map Configuration': '지도 구성',
'Map Configurations': '지도 구성',
'Map Height': '맵 높이',
'Map Service Catalog': '맵 서비스 카탈로그',
'Map Settings': '맵 설정',
'Map Viewing Client': '맵핑 보기 클라이언트',
'Map Width': '맵 너비',
'Map Zoom': '맵 확대',
'Map of Hospitals': '맵 병원 중',
'Map': 'map',
'Marine Security': 'marine 보안',
'Marital Status': '결혼 여부',
'Marker Details': '마커 세부사항',
'Marker added': '마커 추가',
'Marker deleted': '마커 삭제',
'Marker updated': '마커 갱신',
'Marker': '마커',
'Markers': '마커',
'Master Message Log to process incoming reports & requests': '마스터 메시지 로그 수신 보고서를 & 요청을 처리하기 위해',
'Master Message Log': '마스터 메시지 로그',
'Match Percentage': '일치 비율',
'Match Requests': '일치하는 요청',
'Match percentage indicates the % match between these two records': '일치하는 백분율 이 두 레코드 사이의% 일치 표시합니다',
'Match?': '일치 여부',
'Matching Catalog Items': '일치하는 카탈로그 항목',
'Matching Items': '일치하는 항목',
'Matching Records': '대응 레코드를',
'Maximum Location Latitude': '최대 위치는 위도',
'Maximum Location Longitude': '최대 위치 경도',
'Medical and public health': '의료 및 공중 보건',
'Medium': '중간',
'Megabytes per Month': 'mb 당 월',
'Member removed from Group': '멤버쉽 삭제',
'Members': '구성원',
'Membership Details': '멤버쉽 세부사항',
'Membership updated': '멤버쉽 갱신',
'Membership': '멤버쉽',
'Memberships': '멤버십',
'Message Details': '메시지 세부사항',
'Message Variable': '메시지 변수',
'Message added': '메시지 추가',
'Message deleted': '메시지를 삭제했습니다.',
'Message updated': '메시지 갱신됨',
'Message variable': '메시지 변수',
'Messaging settings updated': '메시징 설정 갱신',
'Messaging': '메시지',
'Meteorological (inc. flood)': 'meteorological (inc. 홍수)',
'Method used': '사용된 방법',
'Middle Name': '중간 이름',
'Migrants or ethnic minorities': 'migrants 인종 또는 minorities',
'Military': '군대',
'Minimum Location Latitude': '최소 위치는 위도',
'Minimum Location Longitude': '최소 위치의 경도',
'Minimum shift time is 6 hours': '최소 shift 6 시간 시간',
'Minor Damage': '부 손상',
'Minor/None': '부/없음',
'Minorities participating in coping activities': 'minorities 활동에 참여하는 복사',
'Minutes must be a number between 0 and 60': '0 분 및 60 사이의 숫자여야 합니다',
'Minutes per Month': '분 당 월',
'Minutes should be a number greater than 0 and less than 60': '0 분 및 0 보다 숫자여야 합니다',
'Miscellaneous': '기타',
'Missing Person Details': '누락된 사용자 세부사항',
'Missing Person Registry': '누락된 사용자 레지스트리',
'Missing Person': '누락된 사용자',
'Missing Persons Registry': '사용자 레지스트리 누락',
'Missing Persons Report': '개인 보고서 누락',
'Missing Persons': '누락된 사람이',
'Missing Report': '누락된 보고서',
'Missing Senior Citizen': 'senior 시민 누락',
'Missing Vulnerable Person': '취약한 개인 누락',
'Missing': '누락',
'Mission Details': '세부사항 임무',
'Mission Record': '레코드 임무',
'Mission added': '추가된 임무',
'Mission deleted': '삭제된 임무',
'Mission updated': '갱신된 mission',
'Missions': '임무',
'Mobile Basic Assessment': '모바일 기본 평가',
'Mobile Phone': '휴대폰',
'Mobile': '모바일',
'Mode': '모드',
'Model/Type': '유형/모델',
'Modem Settings': '모뎀 설정(M)',
'Modem settings updated': '모뎀 설정 갱신',
'Modem': '모뎀',
'Moderate': '중재',
'Moderator': '미팅장',
'Modify Information on groups and individuals': '그룹 및 개인 정보를 수정',
'Modifying data in spreadsheet before importing it to the database': '데이터 스프레드시트에, 데이터베이스에 반입하기 전에 수정',
'Module provides access to information on current Flood Levels.': '모듈 현재 홍수 정보에 대한 액세스를 제공합니다.',
'Module': '모듈',
'Monday': '월요일',
'Monthly Cost': '매월 비용',
'Monthly Salary': '월별 급여',
'Months': '월',
'Morgue Status': 'morgue 상태',
'Morgue Units Available': 'morgue 장치 사용',
'Motorcycle': '오토바이',
'Multiple Matches': '다중 일치사항',
'Must a location have a parent location?': '하는 위치에 상위 위치?',
'My Current function': '내 현재 함수',
'My Tasks': '나의 작업',
'NZSEE Level 1': 'nzsee 레벨 1',
'NZSEE Level 2': 'nzsee 레벨 2',
'Name and/or ID': '이름 및/또는 ID',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': '헤더의 배경에 사용되며 정적 공간에 저장된 파일(및 선택적 종속 경로)명',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': '파일 이름 (& 의 선택적 부속 경로) 위-왼쪽 이미지 사용해야 하는 정적 위치합니다.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': '파일 이름 (& 의 선택적 부속 경로) 가 바닥글 사용할 수 있는 보기.',
'Name of the person in local language and script (optional).': '로컬 언어 및 스크립트 (선택적 개인).',
'Name, Org and/or ID': '이름, 조직 및/또는 id',
'Names can be added in multiple languages': '이름이 여러 언어로 추가할 수 있습니다',
'National ID Card': '국가 id 카드',
'National NGO': '자국어 ngo',
'National': '국가',
'Nationality of the person.': '개인의 nationality.',
'Nationality': '국적',
'Nautical Accident': 'nautical 사고',
'Nautical Hijacking': 'nautical 하이잭이라고',
'Need Type Details': '유형 세부사항 합니다',
'Need Type added': '유형을 추가할 필요가',
'Need Type deleted': '유형 삭제 합니다',
'Need Type updated': '유형을 갱신할 필요가',
'Need Type': '유형 합니다',
'Need Types': '유형 합니다',
'Need added': '추가해야 합니다',
'Need deleted': '삭제된 합니다',
'Need to be logged-in to be able to submit assessments': '로그온한 사용자는 에서 평가를 제출할 수 있도록 합니다',
'Need to configure Twitter Authentication': 'twitter 인증을 구성해야 합니다',
'Need to specify a Budget!': '예산 지정해야 합니다!',
'Need to specify a Kit!': '킷을 지정해야 합니다!',
'Need to specify a Resource!': '자원을 지정해야 합니다!',
'Need to specify a bundle!': '번들을 지정해야 합니다!',
'Need to specify a group!': '그룹을 지정해야 합니다!',
'Need to specify a location to search for.': '위치를 지정하도록 합니다.',
'Need to specify a role!': '역할을 지정해야 합니다!',
'Need to specify a table!': '테이블을 지정해야 합니다!',
'Need to specify a user!': '사용자가 지정해야 합니다!',
'Need updated': '갱신된 합니다',
'Needs Details': '필요한 세부사항',
'Needs Maintenance': '필요한 유지보수',
'Needs to reduce vulnerability to violence': '취약성 요구에 대한 violence 줄이기 위해',
'Needs': '요구사항',
'Negative Flow Isolation': '음수 플로우 분리',
'Neighborhood': '인접',
'Neighbouring building hazard': '에지와 빌드 위험',
'Network': '네트워크',
'New Assessment reported from': '새 평가 에서 보고됩니다',
'New Certificate': '새 인증서',
'New Checklist': '새 체크리스트',
'New Entry': '새 항목',
'New Event': '새 이벤트',
'New Item Category': '새 항목 카테고리',
'New Job Role': '새 작업 역할',
'New Location Group': '새 위치 그룹',
'New Location': '새 위치',
'New Peer': '새 피어',
'New Record': '새 레코드',
'New Request': '새 요청',
'New Scenario': '새 시나리오',
'New Skill': '새 기술',
'New Solution Choice': '새 솔루션 선택',
'New Staff Member': '새 스태프 구성원',
'New Support Request': '새 지원 요청',
'New Synchronization Peer': '새 동기화 피어',
'New Team': '신규 팀',
'New Training Course': '새 교육 과정',
'New Volunteer': '새 지원자',
'New cases in the past 24h': '새 유스 지난 24h',
'New': '신규',
'News': '뉴스',
'No Activities Found': '활동이 없음',
'No Alternative Items currently registered': '대체 현재 등록된',
'No Assessment Summaries currently registered': 'no 현재 등록된 평가 요약',
'No Assessments currently registered': '현재 등록된 평가',
'No Assets currently registered in this event': 'no 자산을 현재 이 이벤트에 등록되어',
'No Assets currently registered in this scenario': 'no 자산을 현재 이 시나리오에서 등록된',
'No Assets currently registered': 'no 현재 등록된 자산',
'No Baseline Types currently registered': '기준선 없음 현재 등록된 유형',
'No Baselines currently registered': '현재 등록된 기준선',
'No Brands currently registered': 'no 현재 등록된 브랜드',
'No Budgets currently registered': '현재 등록된 예산',
'No Bundles currently registered': 'no 현재 등록된 번들',
'No Camp Services currently registered': 'no 자녀를 현재 등록된 서비스',
'No Camp Types currently registered': 'no 자녀를 현재 등록된 유형',
'No Camps currently registered': 'camps 현재 등록된 없음',
'No Catalog Items currently registered': '카탈로그 항목 현재 등록된',
'No Catalogs currently registered': '현재 등록된 catalogs',
'No Checklist available': 'no 점검 사용',
'No Cluster Subsectors currently registered': 'subsectors 현재 등록된 클러스터',
'No Clusters currently registered': 'no 클러스터에 현재 등록되어',
'No Commitment Items currently registered': '확약 현재 등록된',
'No Credentials currently set': '현재 권한 세트',
'No Details currently registered': '현재 등록된 세부사항이 없습니다',
'No Documents found': '문서를 찾을 수 없습니다',
'No Donors currently registered': '현재 등록된 기증자가 없습니다',
'No Events currently registered': '현재 등록된 이벤트를',
'No Facilities currently registered in this event': '어떤 기능을 현재 이 이벤트에 등록되어',
'No Facilities currently registered in this scenario': '이 시나리오에 현재 등록된 시설물들이 없습니다',
'No Feature Layers currently defined': '기능 계층 현재 정의된',
'No Flood Reports currently registered': '홍수 없음 현재 등록된 보고서',
'No Groups currently defined': '현재 정의된 그룹이 없음',
'No Groups currently registered': '현재 등록된 그룹이 없음',
'No Hospitals currently registered': 'no 현재 등록된 병원',
'No Human Resources currently registered in this event': '이 이벤트에 현재 등록된 인적 자원이 없습니다',
'No Human Resources currently registered in this scenario': '이 시나리오에 현재 등록된 인적자원이 없습니다',
'No Identification Report Available': 'id 보고서 사용',
'No Identities currently registered': 'no 를 현재 등록된',
'No Image': '이미지 없음',
'No Images currently registered': '이미지가 현재 등록된',
'No Impact Types currently registered': '영향을 현재 등록된 유형',
'No Impacts currently registered': 'no 현재 등록된 영향주기',
'No Incident Reports currently registered': 'no 인시던트 현재 등록된 보고서',
'No Incoming Shipments': '들어오는 운송물을',
'No Item Categories currently registered': '항목이 현재 등록된 범주',
'No Item Packs currently registered': '항목 팩 현재 등록된',
'No Items currently registered in this Inventory': '현재 목록에 등록된 물품이 없습니다.',
'No Items currently registered': '현재 등록된 항목이 없음',
'No Keys currently defined': '키가 현재 정의된',
'No Kits currently registered': '현재 등록된 상품',
'No Level 1 Assessments currently registered': '레벨 1 현재 등록된 평가',
'No Level 2 Assessments currently registered': '레벨 2 현재 등록된 평가',
'No Locations currently available': '현재 위치가 사용',
'No Locations currently registered': '현재 위치가 등록되어',
'No Map Configurations currently defined': '현재 계획 구성이 정의되지 않았습니다',
'No Map Configurations currently registered in this event': '현재 이 이벤트에 등록된 계획 구성이 없습니다',
'No Map Configurations currently registered in this scenario': '현재 이 시나리오에 등록된 계획 구성이 없습니다',
'No Markers currently available': '표시문자 없음 현재 사용',
'No Match': '일치 없음',
'No Matching Catalog Items': '일치하는 카탈로그 항목',
'No Matching Items': '일치하는 항목 없음',
'No Matching Records': '일치하는 레코드가 없습니다.',
'No Members currently registered': '멤버가 현재 등록된',
'No Memberships currently defined': 'no 멤버쉽을 현재 정의된',
'No Messages currently in Outbox': '메시지가 현재 편지함에',
'No Need Types currently registered': '어떤 유형의 현재 등록된 합니다',
'No Needs currently registered': '현재 등록되어야 합니다',
'No Offices currently registered': 'no offices 현재 등록된',
'No Offices found!': 'no offices 찾을 수 없습니다!',
'No Organizations currently registered': '현재 등록된 조직이 없습니다',
'No People currently registered in this camp': '현재 등록된 사용자 이 캠프 에',
'No People currently registered in this shelter': '현재 등록된 사용자 shelter 이 있는',
'No Persons currently registered': '어떤 사용자가 현재 등록되어',
'No Persons currently reported missing': '어떤 사람이 현재 누락 보고된',
'No Persons found': '어떤 사람이 없음',
'No Photos found': '사진 없음 없음',
'No Picture': '그림이 없습니다.',
'No Population Statistics currently registered': 'no 인구 통계 현재 등록된',
'No Presence Log Entries currently registered': 'no presence 로그 항목이 현재 등록된',
'No Problems currently defined': '현재 정의된 문제가',
'No Projections currently defined': 'no projections 현재 정의된',
'No Projects currently registered': '프로젝트가 현재 등록된',
'No Rapid Assessments currently registered': 'no rapid 현재 등록된 평가',
'No Received Items currently registered': 'no 받은 현재 등록된',
'No Received Shipments': 'no 받은 운송물을',
'No Records currently available': '레코드가 현재 사용',
'No Request Items currently registered': '요청이 현재 등록된',
'No Requests': '요청 없음',
'No Rivers currently registered': 'no 현재 등록된 강',
'No Roles currently defined': '현재 정의된 역할 없음',
'No Rooms currently registered': 'no 미팅룸의 현재 등록된',
'No Scenarios currently registered': '어떤 시나리오의 현재 등록된',
'No Sections currently registered': '현재 등록된 섹션이 없습니다',
'No Sectors currently registered': 'no 섹터를 현재 등록된',
'No Sent Items currently registered': '전송된 항목 없음 현재 등록된',
'No Sent Shipments': 'no 보낸 운송물을',
'No Settings currently defined': '현재 정의된 설정이',
'No Shelter Services currently registered': 'no shelter 현재 등록된 서비스',
'No Shelter Types currently registered': 'no shelter 현재 등록된 유형',
'No Shelters currently registered': '현재 등록된 shelters',
'No Solutions currently defined': 'no 현재 정의된 솔루션',
'No Staff Types currently registered': 'no staff 현재 등록된 유형',
'No Subscription available': '사용가능한 받아보기가 없습니다',
'No Subsectors currently registered': 'subsectors 현재 등록된 없음',
'No Support Requests currently registered': '현재 등록된 지원 요청이 없습니다',
'No Survey Answers currently entered.': '현재 입력된 조사 응답합니다.',
'No Survey Questions currently registered': '현재 등록된 설문용 질문이 없습니다',
'No Survey Series currently registered': '현재 등록된 설문 시리즈가 없습니다',
'No Survey Template currently registered': '조사 템플리트 현재 등록된',
'No Tasks with Location Data': '태스크가 데이터 위치',
'No Teams currently registered': '현재 등록된 팀이',
'No Themes currently defined': 'no 현재 정의된 주제',
'No Tickets currently registered': '현재 등록된 티켓',
'No Tracks currently available': 'no 추적합니다 현재 사용',
'No Users currently registered': '사용자가 현재 등록된',
'No Volunteers currently registered': 'volunteers 현재 등록된 없음',
'No Warehouses currently registered': 'no 웨어하우스를 현재 등록된',
'No access at all': '모든 액세스',
'No access to this record!': '이 액세스 레코드에!',
'No action recommended': '권장 조치',
'No conflicts logged': '충돌이 logged',
'No contact information available': '문의처 정보 사용',
'No contacts currently registered': 'no 현재 등록된 연락처',
'No data in this table - cannot create PDF!': '이 테이블-데이터가 pdf 작성할 수 없습니다!',
'No databases in this application': '데이터베이스가 이 응용프로그램에',
'No dead body reports available': '사망자 보고서 사용불가',
'No entries found': '항목을 찾을 수 없습니다',
'No entries matching the query': '항목이 일치하는 조회',
'No entry available': '사용 가능한 항목 없음',
'No location known for this person': '위치가 알려진 이 사람은',
'No locations found for members of this team': '위치가 이 팀 구성원의 수',
'No log entries matching the query': '로그 항목이 일치하는 조회',
'No messages in the system': '메시지가 시스템에서',
'No peers currently registered': 'no 피어의 현재 등록된',
'No pending registrations found': '지연 등록을 찾을 수 없음',
'No pending registrations matching the query': '지연 등록을 조회와 일치하는',
'No person record found for current user.': '어떤 사용자가 현재 사용자에 대한 레코드를 찾을 수 없습니다.',
'No problem group defined yet': '문제점 그룹 아직 정의되지',
'No records matching the query': '조회와 일치하는 레코드가',
'No reports available.': '사용 가능한 보고서가 없습니다.',
'No reports currently available': '현재 사용 가능한 보고서',
'No requests found': '찾은 요청이 없습니다.',
'No resources currently reported': '자원이 현재 보고됩니다',
'No service profile available': '서비스 프로파일 사용',
'No skills currently set': 'no 현재 기술 세트',
'No staff or volunteers currently registered': '직원 또는 no volunteers 현재 등록된',
'No status information available': '상태 정보 사용',
'No synchronization': '동기화 안함',
'No tasks currently registered': '현재 등록된 타스크 없음',
'No template found!': '템플리트!',
'No units currently registered': '현재 등록된 장치가 없음',
'No volunteer availability registered': 'no 자발적으로 가용성 등록한',
'Non-structural Hazards': '비구조적 위험',
'None (no such record)': '없음 (예: 레코드)',
'None': '없음',
'Normal': '정상',
'Not Applicable': '적용할 수 없음',
'Not Authorised!': '권한이 없습니다!',
'Not Possible': '가능하지 않음',
'Not Set': '설정 안 함',
'Not Authorized': '권한이 없습니다',
'Not installed or incorrectly configured.': '설치되지 않았거나 잘못 구성되었습니다.',
'Not yet a Member of any Group': 'no 멤버쉽을 현재 등록된',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': '이 목록에는 활성 volunteers 표시합니다. 시스템에 등록된 모든 사용자가 보고, 이 화면에서 대신 검색',
'Notice to Airmen': '통지 airmen',
'Number of Columns': '열 수',
'Number of Patients': '환자 번호 중',
'Number of Rows': '행 수',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': '이 단위 24 시간 내에서 될 것으로 예상되는 유형의 추가 의료용 수입니다.',
'Number of alternative places for studying': '번호 대체 자릿수를 연구하여 대한',
'Number of available/vacant beds of that type in this unit at the time of reporting.': '이 장치는 해당 유형의 사용/빈 의료용 보고 시.',
'Number of deaths during the past 24 hours.': 'deaths 지난 24 시간 동안 수입니다.',
'Number of discharged patients during the past 24 hours.': '방전된 환자 지난 24 시간 동안 수입니다.',
'Number of doctors': '번호 의사)',
'Number of in-patients at the time of reporting.': 'in-환자를 보고 시.',
'Number of newly admitted patients during the past 24 hours.': '새로 admitted 환자의 지난 24 시간 동안 수입니다.',
'Number of non-medical staff': '수가 의료 인력을',
'Number of nurses': 'number 간호사가 의',
'Number of private schools': '개인용 학교 중',
'Number of public schools': '공용 학교 중',
'Number of religious schools': 'number 종교 학교 중',
'Number of residential units not habitable': '번호 habitable 아닌 residential 장치',
'Number of residential units': 'number residential 장치',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '빈/사용 의료용 이 병원 에 수입니다. 자동으로 매일 보고서 에서 갱신되었습니다.',
'Number of vacant/available units to which victims can be transported immediately.': '빈/사용 하는 장치에 희생 즉시 전송할 수 있습니다.',
'Number or Label on the identification tag this person is wearing (if any).': '번호 또는 레이블 id 태그에 있는 경우 wearing 이 (가).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': '번호 또는 코드 위치를 찾으려면 (예: 플래그 코드, 좌표 격자, 사이트 참조 번호 또는 유사한 (사용 가능한 경우) 을 표시하는 데 사용됩니다.',
'Number': '번호',
'Number/Percentage of affected population that is Female & Aged 0-5': '번호/백분율 여성 & 오래되었습니다 0-5 영향을 모집단의',
'Number/Percentage of affected population that is Female & Aged 13-17': '번호/백분율 여성 & 오래되었습니다 13-17 영향을 모집단의',
'Number/Percentage of affected population that is Female & Aged 18-25': '번호/백분율 여성 & 오래되었습니다 18-25 영향을 모집단의',
'Number/Percentage of affected population that is Female & Aged 26-60': '번호/백분율 여성 & 오래되었습니다 26-60 영향을 모집단의',
'Number/Percentage of affected population that is Female & Aged 6-12': '번호/백분율 여성 & 오래되었습니다 6-12 영향을 모집단의',
'Number/Percentage of affected population that is Female & Aged 61+': '번호/백분율 여성 & 유효 61+ 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 0-5': '번호/백분율 남성 & 오래되었습니다 0-5 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 13-17': '번호/백분율 남성 & 오래되었습니다 13-17 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 18-25': '번호/백분율 남성 & 오래되었습니다 18-25 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 26-60': '번호/백분율 남성 & 오래되었습니다 26-60 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 6-12': '번호/백분율 남성 & 오래되었습니다 6-12 영향을 모집단의',
'Number/Percentage of affected population that is Male & Aged 61+': '번호/백분율 남성 및 유효 61+ 영향을 모집단의',
'Nursery Beds': 'nursery 의료용',
'Nutrition problems': 'nutrition 문제점',
'OK': '확인',
'OR Reason': '또는 이유',
'OR Status Reason': '상태 이유',
'OR Status': '또는 상태',
'Observer': '관찰자',
'Obsolete': '사용되지 않음',
'Office Address': '사무실 주소',
'Office Details': 'office 세부사항',
'Office Phone': '사무실 전화번호',
'Office added': 'office 추가',
'Office deleted': 'office 삭제됨',
'Office updated': '사무실 갱신된',
'Office': '사무실',
'Offices & Warehouses': '사무실 및 창고',
'Offline Sync (from USB/File Backup)': '오프라인 동기화 (usb/백업에서)',
'Offline Sync': '오프라인 동기화',
'Older people as primary caregivers of children': '이전 사용자 하위의 caregivers 차)',
'Older people in care homes': '이전 사용자 관리 홈)',
'Older people participating in coping activities': '이전 사용자 활동을 복사하는 참여',
'Older person (>60 yrs)': '이전 (>60 세의)',
'On by default? (only applicable to Overlays)': '기본적으로? (적용) 오버레이',
'On by default?': '기본적으로?',
'One Time Cost': '일회 비용',
'One time cost': '일회 비용',
'One-time costs': '일회 비용',
'One-time': '하나-시간',
'Oops! something went wrong on our side.': ':NONE. 다른 우리 측의 잘못되었으며.',
'Opacity (1 for opaque, 0 for fully-transparent)': '불투명도 (opaque 의 1, 완전히 투명합니다 0)',
'Open area': '열린 영역',
'Open recent': '최신 문서 열기',
'Open': '열기',
'Operating Rooms': '미팅룸 운영',
'Optional link to an Incident which this Assessment was triggered by.': '선택적 링크 이 평가에 의해 트리거된 인시던트에.',
'Optional': '선택적',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': '선택사항입니다. 사용자가 양식 속성의 값을 기반으로 하는 기능, 여기에 속성을 선택하십시오.',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '선택사항입니다. geoserver, 이 작업공간에 네임스페이스 uri 입니다 (!). wfs getcapabilities 내에서 이 콜론 (:) featuretype 이름 부분.',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': '선택사항입니다. 컨텐츠가 이미지 파일의 url 팝업을 넣어야 하는 요소의 이름입니다.',
'Optional. The name of an element whose contents should be put into Popups.': '선택사항입니다. 컨텐츠가 팝업을 넣어야 하는 요소의 이름입니다.',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': '선택사항입니다. 스키마의 이름입니다. geoserver 이 양식을 http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'Options': '옵션',
'Organization Details': '조직 세부사항',
'Organization Registry': '조직 레지스트리',
'Organization added': '조직 추가',
'Organization deleted': '조직 삭제',
'Organization updated': '조직 갱신',
'Organization': '조직',
'Organizations': '조직',
'Origin of the separated children': '원점은 분리된 하위',
'Other (describe)': '기타(설명)',
'Other (specify)': '기타 (자세히 기술)',
'Other Evidence': '다른 증거를',
'Other Faucet/Piped Water': '다른 faucet/물 파이프',
'Other Isolation': '다른 분리',
'Other Name': '기타 이름',
'Other activities of boys 13-17yrs before disaster': '기타 활동 boys 13-17yrs 전에 피해',
'Other activities of boys 13-17yrs': '기타 활동 boys 13-17yrs',
'Other activities of boys <12yrs before disaster': '기타 활동 boys <12yrs 재해 전에',
'Other activities of boys <12yrs': '기타 활동 boys <12yrs',
'Other activities of girls 13-17yrs before disaster': '기타 활동 girls 13-17yrs 재해 전에',
'Other activities of girls 13-17yrs': '기타 활동 girls 13-17yrs',
'Other activities of girls<12yrs before disaster': '기타 활동 girls<12yrs 전에 피해',
'Other activities of girls<12yrs': '기타 활동 girls<12yrs',
'Other alternative infant nutrition in use': '다른 대체 infant nutrition 사용',
'Other alternative places for study': '다른 대체 위치에 대한 연구',
'Other assistance needed': '기타 지원이 필요',
'Other assistance, Rank': '기타 지원, rank',
'Other current health problems, adults': '기타 문제점, 현재 상태 adults',
'Other current health problems, children': '다른 현재 health 문제점 하위',
'Other events': '기타 이벤트',
'Other factors affecting school attendance': '다른 학교 현황을 영향을',
'Other major expenses': '다른 주요 비용',
'Other non-food items': '기타 식품 항목',
'Other recommendations': '기타 권장사항',
'Other residential': '다른 지역에서',
'Other school assistance received': '다른 학교 지원을 받은',
'Other school assistance, details': '다른 학교, 세부사항',
'Other school assistance, source': '다른 학교 지원 소스',
'Other settings can only be set by editing a file on the server': '기타 설정은 서버에 있는 파일을 편집하여 설정할 수 있습니다',
'Other side dishes in stock': '다른 dishes 재고에',
'Other types of water storage containers': '다른 유형의 워터마크 저장영역 컨테이너',
'Other ways to obtain food': '다른 방법으로 확보하기 위해 식품',
'Other': '기타',
'Outbound Mail settings are configured in models/000_config.py.': '아웃바운드 메일 설정을 models/000_config. py 구성됩니다.',
'Outbox': '보낼 편지함',
'Outgoing SMS Handler': 'sms 전송 핸들러',
'Outgoing SMS handler': 'sms 전송 핸들러',
'Overall Hazards': '전체 위험',
'Overhead falling hazard': '오버헤드 폴백하기 위험',
'Overland Flow Flood': 'overland 플로우 플러드',
'Owned Resources': '소유한 자원',
'PIN number': 'PIN 번호',
'PIN': '핀',
'PL Women': 'pl 여성',
'Pack': '팩',
'Packs': '팩',
'Parameters': '매개변수',
'Parent Office': '상위 사무실',
'Parent needs to be of the correct level': '올바른 상위 레벨의 합니다',
'Parent needs to be set for locations of level': '상위 레벨의 위치를 설정해야 합니다',
'Parent needs to be set': '상위 설정해야 합니다',
'Parent': '상위',
'Parents/Caregivers missing children': '상위/caregivers 하위 누락',
'Partial': '일부',
'Participant': '참가자',
'Pashto': '파슈토어',
'Pass': '패스',
'Passport': '패스포트',
'Path': '경로',
'Patients': '환자',
'Peer Details': '피어 세부사항',
'Peer Registration Details': '피어 등록 세부사항',
'Peer Registration Request': '피어 등록 요청',
'Peer Registration': '피어 등록',
'Peer Type': '피어 유형',
'Peer UID': '피어 uid',
'Peer added': '피어 추가',
'Peer deleted': '피어 삭제',
'Peer not allowed to push': '피어 않을 수 누름',
'Peer registration request added': '피어 등록 요청 추가',
'Peer registration request deleted': '피어 등록 요청 삭제',
'Peer registration request updated': '피어 등록 요청이 갱신될',
'Peer updated': '피어 갱신된',
'Peer': '피어',
'Peers': '피어와',
'Pending Requests': '보류 중인 요청',
'Pending': '보류 중',
'People Needing Food': '사용자 하는 식품',
'People Needing Shelter': '사용자 shelter 필요',
'People Needing Water': '사용자 물이 필요',
'People Trapped': '사용자 트랩된',
'People': '사용자',
'Performance Rating': '성능 평가',
'Person 1': '사용자 1',
'Person 1, Person 2 are the potentially duplicate records': '사용자 1, 개인 2 잠재적으로 중복 레코드가 있습니다',
'Person 2': '사용자 2',
'Person De-duplicator': 'de 개인-duplicator',
'Person Details': '개인 세부사항',
'Person Registry': '사용자 레지스트리',
'Person added to Group': '그룹 구성원 추가',
'Person added to Team': '그룹 구성원 추가',
'Person added': '개인 추가됨',
'Person deleted': '개인 삭제',
'Person details updated': '사용자 세부사항 갱신',
'Person interviewed': '개인 interviewed',
'Person who has actually seen the person/group.': '실제로 사용자/그룹에 표시되는 있습니다.',
'Person': '사용자',
'Person/Group': '사용자/그룹',
'Personal Data': '개인 데이터',
'Personal Effects Details': '개인 효과 세부사항',
'Personal Effects': '개인 효과',
'Personal Map': '개인 맵',
'Personal Profile': '개인 프로파일',
'Personal impact of disaster': '개인 impact 피해',
'Persons in institutions': '사용자 단체)',
'Persons with disability (mental)': '개인 disability (정신적)',
'Persons with disability (physical)': '개인 disability (실제)',
'Persons': '개인',
'Phone 1': '전화 1',
'Phone 2': '전화 2',
'Phone': '전화',
'Phone/Business': '전화/비즈니스',
'Phone/Emergency': '전화/비상',
'Phone/Exchange (Switchboard)': '전화/exchange (교환원)',
'Photo Details': '사진 세부사항',
'Photo Taken?': '사진 가져옵니다?',
'Photo added': '사진 추가',
'Photo deleted': '사진 삭제',
'Photo updated': '사진 갱신',
'Photo': '사진',
'Photograph': '사진',
'Photos': '사진',
'Physical Description': '물리적 설명',
'Physical Safety': '물리적 안전',
'Picture upload and finger print upload facility': 'picture 업로드하고 finger print 업로드하는 기능',
'Picture': '그림',
'Place of Recovery': '대신 복구',
'Place': '장소',
'Places for defecation': '작업공간 defecation 에',
'Places the children have been sent to': '하위에도 전송되지 않은 작업공간',
'Playing': '재생',
'Please correct all errors.': '모든 오류를 정정하십시오.',
'Please enter a first name': '이름을 입력하십시오.',
'Please enter a site OR a location': '먼저 사이트를 입력하십시오 또는 위치',
'Please enter the first few letters of the Person/Group for the autocomplete.': '아래의 개인/그룹은 autocomplete 의 처음 몇 글자를 입력하십시오.',
'Please enter the recipient': '아래 수신인.',
'Please fill this!': '이 채우십시오!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': '참고하고자 하는 URL 페이지, 발생할 것으로 예측되는 것과 실제로 발생한 것에 대한 설명을 제공해 주시기 바랍니다.',
'Please report here where you are:': '현재 위치를 여기에 알려주세요:',
'Please select another level': '다른 레벨을 선택해 주세요',
'Please select': '선택해주세요',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '사인-귀하의 휴대폰에 이 같이 us 텍스트 메시지를 보낼 수 있습니다. 전체 지역 코드를 포함하십시오.',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '자세한 질병 적절한 처리 및 문제가 되는 자세히 (숫자, 해당) 를 지정하십시오. 또한 상황은 개선될 수 제안을 추가할 수 있습니다.',
'Please use this field to record any additional information, including a history of the record if it is updated.': '이를 갱신되는 경우 이 필드에 추가 정보를, 레코드 히스토리를 비롯한 레코드 수.',
'Please use this field to record any additional information, including any Special Needs.': '이 필드에 추가 정보를 비롯하여 특수한 기록하십시오.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': '이 필드에 임의의 추가 정보 (예: ushahidi 인스턴스 id 를 기록하십시오. 이 경우 갱신된 레코드 히스토리를 포함합니다.',
'Pledge Support': 'pledge 지원',
'Point': '지점',
'Police': '경찰',
'Pollution and other environmental': '및 기타 환경 오염',
'Polygon reference of the rating unit': '다각형 참조는 등급 장치',
'Polygon': '다각형',
'Poor': '나쁨',
'Population Statistic Details': '인구 통계 세부사항',
'Population Statistic added': '인구 통계 추가',
'Population Statistic deleted': '인구 통계 삭제',
'Population Statistic updated': '인구 통계 갱신',
'Population Statistics': '인구 통계',
'Population and number of households': '채우기 및 households 의 번호',
'Population': '인구',
'Popup Fields': '팝업 필드',
'Popup Label': '팝업 레이블',
'Port Closure': '포트 처리완료',
'Position Catalog': '위치 카탈로그',
'Position Details': '위치 세부사항',
'Position added': '위치 추가',
'Position deleted': '위치 삭제',
'Position updated': '위치 갱신',
'Position': '위치',
'Positions': '위치',
'Postcode': '우편 번호',
'Poultry restocking, Rank': 'restocking 가금류, 랭크',
'Poultry': '가금류',
'Pounds': '파운드',
'Power Failure': '전원 장애',
'Pre-cast connections': '사전 cast 연결',
'Preferred Name': '선호하는 이름',
'Pregnant women': 'pregnant 여성',
'Preliminary': '예비',
'Presence Condition': '존재 조건',
'Presence Log': '인식 로그',
'Presence': '인식',
'Primary Occupancy': '기본 occupancy',
'Priority from 1 to 9. 1 is most preferred.': '1 에서 9 로 우선순위입니다. 1 가장 좋습니다.',
'Private': '개인용',
'Problem Administration': '문제점 관리',
'Problem Details': '출고 세부사항',
'Problem Group': '문제점 그룹',
'Problem Title': '문제점 제목',
'Problem added': '추가 문제점',
'Problem connecting to twitter.com - please refresh': '문제점 twitter.com-please refresh 연결',
'Problem deleted': '문제점 삭제됨',
'Problem updated': '문제점 갱신됨',
'Problem': '문제점',
'Problems': '문제',
'Procedure': '절차',
'Process Received Shipment': '프로세스 받은 운송',
'Process Shipment to Send': '프로세스 shipment 보내기',
'Profile': '프로파일',
'Project Details': '프로젝트 세부사항',
'Project Status': '프로젝트 상태',
'Project added': '프로젝트 추가',
'Project deleted': '프로젝트 삭제',
'Project has no Lat/Lon': '프로젝트 lat/₩.',
'Project updated': '프로젝트 갱신',
'Project': '프로젝트',
'Projection Details': '프로젝션 세부사항',
'Projection added': '프로젝션을 추가됩니다',
'Projection deleted': '프로젝션 삭제',
'Projection updated': '갱신된 투영',
'Projection': '프로젝션',
'Projections': '프로젝션',
'Projects': '프로젝트',
'Property reference in the council system': '등록 정보 참조, council 시스템에서',
'Protected resource': '보호 자원',
'Protection': '보호',
'Provide Metadata for your media files': '매체 파일에 대한 메타데이터 제공',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': '전체 빌드 또는 손상을 지점의 선택적 스케치가 제공합니다. 손상이 있음을 가리킵니다.',
'Proxy-server': '프록시 서버',
'Psychiatrics/Adult': 'psychiatrics/성인',
'Psychiatrics/Pediatric': 'psychiatrics/pediatric',
'Public Event': '공용 이벤트',
'Public and private transportation': '공용 및 개인용 교통',
'Public assembly': 'public 어셈블리',
'Public': 'public',
'Pull tickets from external feed': '외부 피드에서 티켓 pull',
'Punjabi': '펀잡어',
'Purchase Date': '구매일',
'Push tickets to external system': '티켓을 외부 시스템에 밀어넣습니다',
'Pyroclastic Flow': '화산암 유동',
'Pyroclastic Surge': 'pyroclastic surge',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'python 직렬 모듈 사용 중인 python-이 내에 모뎀을 활성화하는 데 필요한 설치',
'Python needs the ReportLab module installed for PDF export': 'reportlab 모듈 사용 중인 python-이 내에서 pdf 출력 installing 합니다!',
'Quantity Committed': '커미트된 수량',
'Quantity Fulfilled': '이행되었을 수량',
'Quantity in Transit': '운송 중인 수량',
'Quantity': '수량',
'Quarantine': '검역소로 격리',
'Queries': '쿼리',
'Query': '쿼리',
'Queryable?': '조회?',
'RC frame with masonry infill': 'rc masonry infill 함께 프레임',
'RECORD A': '레코드 A',
'RECORD B': '레코드 B',
'Race': '레이스',
'Radio Callsign': '선택 callsign',
'Radiological Hazard': 'radiological 위험',
'Railway Accident': '철도 사고',
'Railway Hijacking': '철도 하이잭',
'Rain Fall': '비가 복귀',
'Rapid Assessment Details': '긴급 평가 세부사항',
'Rapid Assessment added': '긴급 평가 추가',
'Rapid Assessment deleted': '긴급 평가 삭제',
'Rapid Assessment updated': '긴급 평가 갱신',
'Rapid Assessment': '빠른 평가',
'Rapid Assessments & Flexible Impact Assessments': '긴급 평가 및 유연한 영향 평가',
'Rapid Assessments': '긴급 평가',
'Rapid Close Lead': 'rapid 닫으십시오 lead',
'Rapid Data Entry': '신속한 데이터 항목',
'Raw Database access': '원시 데이터베이스 액세스',
'Receive New Shipment': '수신 새 운송',
'Receive Shipment': '납품 받기',
'Receive this shipment?': '이 shipment?',
'Receive': 'receive',
'Received By Person': '에서 받은 사람',
'Received By': '입고자',
'Received Item Details': '수신된 항목 세부사항',
'Received Item deleted': '수신된 항목 삭제',
'Received Item updated': '수신된 항목 갱신',
'Received Shipment Details': '수신된 발송물 세부사항',
'Received Shipment canceled and items removed from Inventory': '수신된 shipment 취소되고 인벤토리에서 항목이 제거됩니다',
'Received Shipment canceled': '수신된 shipment 취소됨',
'Received Shipment updated': '수신된 shipment 갱신된',
'Received Shipments': '수신된 운송물을',
'Received': '받은 날짜',
'Receiving and Sending Items': '수신 및 송신 항목',
'Recipient': '받는 사람',
'Recipients': '수신인',
'Recommendations for Repair and Reconstruction or Demolition': '권장사항 수리 및 복원 또는 demolition 대한',
'Record Details': '레코드 세부사항',
'Record Saved': '레코드가 저장되었습니다',
'Record added': '레코드가 추가됨',
'Record any restriction on use or entry': 'use 또는 entry 에서 어떤 레코드 제한',
'Record deleted': '레코드 삭제됨',
'Record last updated': '마지막으로 갱신되 레코드',
'Record not found!': '레코드를 찾을 수 없습니다!',
'Record not found': '레코드를 찾지 못함',
'Record updated': '레코드 갱신됨',
'Record': '레코드',
'Recording and Assigning Assets': '자산 기록 및 할당',
'Records': '레코드(기록)',
'Recovery Request added': '복구 요청 추가',
'Recovery Request deleted': '복구 삭제 요청',
'Recovery Request updated': '복구 갱신된 요청',
'Recovery Request': '복구 요청',
'Recovery Requests': '복구 요청',
'Recovery': '복구',
'Recurring Cost': '반복 비용',
'Recurring cost': '반복 비용',
'Recurring costs': '반복 비용',
'Recurring': '반복',
'Red Cross / Red Crescent': '빨간색/red crescent',
'Red': '적색',
'Reference Document': '참조 문서',
'Refresh Rate (seconds)': '새로 고치기 비율(초)',
'Region Location': '지역 위치',
'Regional': '지역',
'Regions': '지역/지구',
'Register Person into this Camp': '이 등록 캠프 에',
'Register Person into this Shelter': '이 등록 shelter 로',
'Register Person': '사용자 등록',
'Register them as a volunteer': '레지스터 이를 자발적으로)',
'Register': '등록',
'Registered People': '등록된 사용자',
'Registered users can': '등록된 사용자 수',
'Registration Details': '등록 세부사항',
'Registration added': '등록 추가',
'Registration entry deleted': '등록 항목 삭제',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': '등록 여전히 승인자 (%s) - 승인 보류 중입니다. 받은 확인 때까지 기다리십시오.',
'Registration updated': '등록 갱신',
'Registration': '등록',
'Rehabilitation/Long Term Care': 'rehabilitation/장기 지원',
'Rejected': '거부된 날짜',
'Relief Team': '팀 릴리프',
'Relief': '릴리프',
'Religious Leader': '종교 리더',
'Religious': '종교',
'Relocate as instructed in the <instruction>': '에 지시된 대로 재배치<instruction>',
'Remove Asset from this event': '이 이벤트 자산 제거',
'Remove Asset from this scenario': '이 시나리오는 자산 제거',
'Remove Facility from this event': '이 이벤트에서 기능 제거',
'Remove Facility from this scenario': '이 시나리오는 에서 설비 제거',
'Remove Human Resource from this event': '이 이벤트에서 인적 자원 제거',
'Remove Human Resource from this scenario': '이 시나리오에서 인적 자원 제거',
'Remove Item from Inventory': '재고 항목 제거',
'Remove Map Configuration from this event': '이 이벤트는 맵 구성 제거',
'Remove Map Configuration from this scenario': '이 시나리오에서는 맵 구성 제거',
'Remove Person from Group': '멤버쉽 삭제',
'Remove Person from Team': '멤버쉽 삭제',
'Remove this asset from this event': '이 이벤트에서 이 자산 제거',
'Remove this asset from this scenario': '이 시나리오에서 이 자산 제거',
'Remove': '제거',
'Removed from Group': '멤버쉽 삭제',
'Removed from Team': '멤버쉽 삭제',
'Repair': '수리',
'Repaired': '수리',
'Repeat your password': '암호 반복',
'Replace if Master': '대체 마스터 경우',
'Replace if Newer': '최신인 경우 바꾸기',
'Replace': '대체',
'Report Another Assessment...': '보고서는 다른 평가...',
'Report Details': '보고서 세부사항',
'Report Resource': '자원 보고서',
'Report Types Include': '보고서 유형',
'Report added': '보고서 추가',
'Report deleted': '보고서가 삭제됨',
'Report my location': '보고서 내 위치',
'Report the contributing factors for the current EMS status.': '현재 ems 상태에 대한 기여 요인.',
'Report the contributing factors for the current OR status.': '현재 또는 상태에 대한 기여 요인.',
'Report them as found': '이를 찾을 보고서',
'Report them missing': '보고서는 누락',
'Report updated': '보고서를 갱신했습니다.',
'Report': '보고',
'Reporter Name': '보고자 이름',
'Reporter': '보고자',
'Reporting on the projects in the region': '프로젝트 영역에 대한 보고',
'Reports': '보고서',
'Request Added': '요청 추가',
'Request Canceled': '요청이 취소되었습니다.',
'Request Details': '요청 세부사항',
'Request From': '요청 전송처',
'Request Item Details': '항목 세부사항 요청',
'Request Item added': '요청 항목 추가됨',
'Request Item deleted': '요청 항목 삭제',
'Request Item from Available Inventory': '요청 항목 사용 명세',
'Request Item updated': '요청 항목 갱신',
'Request Item': '요청 항목',
'Request Items': '항목을 요청',
'Request Status': '요청 상태',
'Request Type': '요청 유형',
'Request Updated': '업데이트 항목 요청',
'Request added': '요청 추가',
'Request deleted': '삭제된 요청',
'Request for Role Upgrade': '요청 역할 업그레이드',
'Request updated': '업데이트 항목 요청',
'Request': 'request',
'Request, Response & Session': '요청, 응답 및 세션',
'Requested By Facility': '요청된 설비',
'Requested By': '요청자',
'Requested From': '요청된',
'Requested Items': '요청된 항목',
'Requested by': '요청자',
'Requested on': '요구됩니다.',
'Requested': '요청됨',
'Requester': '요청자',
'Requests Management': '요청 관리',
'Requests': '요청',
'Requires Login!': '로그인이 필요합니다!',
'Reset Password': '비밀번호 재설정',
'Reset': '재설정',
'Resolve Conflict': '충돌 해결',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': '분석할 링크 이 중복 레코드를 해결한 후 데이터베이스를 갱신하는 데 도움이 되는 새 화면 불러옵니다.',
'Resolve': '해결',
'Resource Details': '자원 세부사항',
'Resource added': '자원 추가',
'Resource deleted': '자원 삭제됨',
'Resource updated': '자원이 갱신됨',
'Resource': '자원',
'Resources': '자원',
'Respiratory Infections': '호흡기 infections',
'Response': '응답',
'Restricted Access': '제한된 액세스',
'Restricted Use': '사용 제한',
'Results': '결과',
'Retail Crime': '소매 범죄',
'Retrieve Password': '암호 검색',
'Return to Request': '요청 리턴',
'Return': '수익',
'Returned From': '리턴자',
'Returned': '반품됨',
'Review Incoming Shipment to Receive': '검토 수신 선적 수신',
'Rice': '라이스',
'Riot': 'riot',
'River Details': '세부사항 강',
'River added': '추가된 강',
'River deleted': '삭제된 강',
'River updated': '갱신된 강',
'River': '강',
'Rivers': '강',
'Road Accident': '도로 사고',
'Road Closed': '닫혔습니다.',
'Road Conditions': '조건 로드',
'Road Delay': '도로 지연',
'Road Hijacking': '도로 하이잭',
'Road Usage Condition': '도로 사용 조건',
'Role Details': '역할 세부사항',
'Role Required': '필요한 역할',
'Role Updated': '역할 갱신',
'Role added': '역할 추가됨',
'Role deleted': '역할이 삭제됨',
'Role updated': '역할 갱신',
'Role': '역할',
'Role-based': '역할 기반',
'Roles Permitted': '허용되는 역할',
'Roles': '역할',
'Roof tile': 'roof 바둑판식',
'Roofs, floors (vertical load)': 'roofs, 바닥 (수직 load)',
'Room Details': '강의실 세부사항',
'Room added': '미팅룸 추가됩니다',
'Room deleted': '삭제된 강의실',
'Room updated': '미팅룸 갱신된',
'Room': '룸',
'Rooms': '회의실',
'Rows in table': '테이블의 행',
'Rows selected': '평균 행',
'Run Interval': '실행 간격',
'Running Cost': '운전 비용',
'Safe environment for vulnerable groups': '안전한 환경에서 취약한 그룹',
'Safety Assessment Form': '안전 평가 양식',
'Safety of children and women affected by disaster?': '여성과 어린이들의 안전이 재해로 인해 영향을 받습니까?',
'Sahana Administrator': 'sahana 관리자',
'Sahana Blue': 'sahana 파란색',
'Sahana Community Chat': 'sahana 커뮤니티 대화',
'Sahana Eden <=> Other': 'sahana eden <=> 기타',
'Sahana Eden Humanitarian Management Platform': 'sahana eden humanitarian 관리 플랫폼',
'Sahana Eden Website': 'sahana eden 웹 사이트',
'Sahana Green': 'sahana 초록색',
'Sahana Steel': 'sahana steel',
'Sahana access granted': 'sahana 액세스 권한',
'Salted Fish': 'salted fish',
'Sanitation problems': 'sanitation 문제점',
'Saturday': '토요일',
'Save': '저장',
'Saved.': '저장했습니다.',
'Saving...': '저장 중...',
'Scale of Results': '결과의 스케일',
'Scenario Details': '시나리오 세부사항',
'Scenario added': '시나리오가 추가되었습니다',
'Scenario deleted': '시나리가 삭제되었습니다',
'Scenario updated': '시나리오 갱신된',
'Scenario': '시나리오',
'Scenarios': '시나리오',
'Schedule': '스케줄',
'Schema': '스키마',
'School Closure': '학교 처리완료',
'School Lockdown': '잠금 학교',
'School Teacher': '고등학교 교사',
'School activities': '학교 활동',
'School assistance': '학교 지원',
'School attendance': '학교 현황',
'School destroyed': '학교 destroyed',
'School heavily damaged': '학교에 많이 손상됨.',
'School tents received': '학교에서 tents 받은',
'School tents, source': 'tents 학교, 소스',
'School used for other purpose': '기타 다른 용도로 학교',
'School': '학교',
'School/studying': '학교/연구하여',
'Schools': '학교',
'Search Activities': '활동 검색',
'Search Activity Report': '활동 보고서 검색',
'Search Addresses': '주소 검색',
'Search Alternative Items': '대체 항목 검색',
'Search Assessment Summaries': '평가 요약 검색',
'Search Assessments': '검색 평가',
'Search Asset Log': '자산 로그 검색',
'Search Assets': '자산 검색',
'Search Baseline Type': '검색 기준 유형',
'Search Baselines': '기준선 검색',
'Search Brands': '검색 브랜드',
'Search Budgets': '검색 예산',
'Search Bundles': '번들 검색',
'Search Camp Services': '자녀를 서비스 검색',
'Search Camp Types': '자녀를 유형 검색',
'Search Camps': '검색 camps',
'Search Catalog Items': '카탈로그 항목 검색',
'Search Catalogs': '카탈로그 검색',
'Search Certificates': '인증서 검색',
'Search Certifications': '인증 검색',
'Search Checklists': '검색 목록',
'Search Cluster Subsectors': '클러스터의 검색 subsectors',
'Search Clusters': '클러스터에서 검색',
'Search Commitment Items': '검색 항목 확약',
'Search Commitments': '검색 commitments',
'Search Competencies': '검색 능력',
'Search Competency Ratings': '능력 등급 검색',
'Search Contact Information': '연락처 검색',
'Search Contacts': '연락처 검색',
'Search Course Certificates': 'certicates 검색 과정',
'Search Courses': '과정 검색',
'Search Credentials': '신임 검색',
'Search Documents': '문서 검색',
'Search Donors': '검색 donors',
'Search Entries': '항목 검색',
'Search Events': '이벤트 검색',
'Search Facilities': '검색 기능',
'Search Feature Layers': '검색 기능은 계층',
'Search Flood Reports': '검색 홍수 보고서',
'Search Groups': '그룹 검색',
'Search Human Resources': '인적 자원 검색',
'Search Identity': 'id 검색',
'Search Images': '이미지 검색',
'Search Impact Type': '검색 유형 영향',
'Search Impacts': '영향 검색',
'Search Incident Reports': '검색 인시던트 보고서',
'Search Inventory Items': '재고 항목 검색',
'Search Inventory items': '재고 항목 검색',
'Search Item Categories': '항목 카테고리 검색',
'Search Item Packs': '항목 을 검색',
'Search Items': '아이템 검색',
'Search Job Roles': '작업 역할 검색',
'Search Keys': '검색 키',
'Search Kits': '상품 검색',
'Search Layers': '계층 검색',
'Search Level 1 Assessments': '레벨 1 평가 검색',
'Search Level 2 Assessments': '레벨 2 평가 검색',
'Search Locations': '검색 위치',
'Search Log Entry': '로그 항목 검색',
'Search Map Configurations': '맵에서 구성 검색',
'Search Markers': '검색 마커',
'Search Members': '구성원 검색',
'Search Membership': '구성원 검색',
'Search Memberships': '구성원 검색',
'Search Missions': '검색 임무',
'Search Need Type': '필요한 검색 유형',
'Search Needs': '필요한 검색',
'Search Offices': 'offices 검색',
'Search Organizations': '조직 검색',
'Search Peer': '피어 검색',
'Search Personal Effects': '개인 검색 효과',
'Search Persons': '개인 검색',
'Search Photos': '사진 검색',
'Search Population Statistics': '인구 통계 검색',
'Search Positions': '검색 위치',
'Search Problems': '검색 문제점',
'Search Projections': '검색 투영',
'Search Projects': '검색 프로젝트',
'Search Rapid Assessments': '빠른 검색 수행',
'Search Received Items': '수신된 항목 검색',
'Search Received Shipments': '수신된 운송물을 검색',
'Search Records': '검색 레코드',
'Search Registations': 'registations 검색',
'Search Registration Request': '등록 요청 검색',
'Search Report': '보고서 검색',
'Search Request Items': '요청 항목 검색',
'Search Request': '검색 요청',
'Search Requested Items': '요청된 항목 검색',
'Search Requests': '요청 검색',
'Search Resources': '자원 검색',
'Search Rivers': '검색 강',
'Search Roles': '역할 검색',
'Search Rooms': '회의실 검색',
'Search Scenarios': '검색 시나리오',
'Search Sections': '검색 섹션',
'Search Sectors': '섹터를 검색',
'Search Sent Items': '전송된 항목 검색',
'Search Sent Shipments': '송신된 운송물을 검색',
'Search Service Profiles': '검색 서비스 프로파일',
'Search Settings': '검색 설정',
'Search Shelter Services': '검색 shelter 서비스',
'Search Shelter Types': '검색 shelter 유형',
'Search Shelters': '검색 shelters',
'Search Skill Equivalences': 'equivalences 스킬 검색',
'Search Skill Provisions': '검색 기술 조항',
'Search Skill Types': '기술 유형 검색',
'Search Skills': '기술 검색',
'Search Solutions': '해결방안 검색',
'Search Staff Types': '직원 유형 검색',
'Search Staff or Volunteer': '스태프 또는 지원자 검색',
'Search Status': '검색 상태',
'Search Subscriptions': '자동 통지 등록 검색',
'Search Subsectors': '검색 subsectors',
'Search Support Requests': '지원 요청 검색',
'Search Tasks': '작업 검색',
'Search Teams': '팀 검색',
'Search Themes': '주제 검색',
'Search Tickets': '티켓 검색',
'Search Tracks': '트랙 검색',
'Search Trainings': '검색 trainings',
'Search Twitter Tags': '트위터 태그 검색',
'Search Units': '장치 검색',
'Search Users': '사용자 검색',
'Search Volunteer Availability': '가용성 지원자 검색',
'Search Volunteers': '검색 volunteers',
'Search Warehouses': '웨어하우스에서 검색',
'Search and Edit Group': '검색 및 그룹 편집',
'Search and Edit Individual': '검색 및 개별 편집',
'Search for Staff or Volunteers': '검색 직원 또는 volunteers 대한',
'Search for a Location by name, including local names.': '위치 이름, 로컬 이름 검색.',
'Search for a Person': '사용자 검색',
'Search for a Project': '프로젝트 검색',
'Search for a shipment by looking for text in any field.': '모든 필드에 텍스트를 찾아 운송 검색하십시오.',
'Search for a shipment received between these dates': '이 날짜 사이에 수신된 발송물에 대한 검색',
'Search for an Organization by name or acronym': '이름 또는 약어 로 조직에 대한 검색',
'Search for an Organization by name or acronym.': '이름 또는 acronym 의해 조직을 검색하려면.',
'Search for an asset by text.': '텍스트 자산을 검색하십시오.',
'Search for an item by category.': '카테고리 항목을 검색하십시오.',
'Search for an item by text.': '텍스트 항목을 검색하십시오.',
'Search for asset by country.': '국가 의해 자산을 검색하십시오.',
'Search for office by country.': '국가에 따라 사무실 검색하십시오.',
'Search for office by organization.': '조직에서 사무실 검색하십시오.',
'Search for office by text.': '텍스트 검색 사무실.',
'Search for warehouse by country.': '국가 웨어하우스 검색하십시오.',
'Search for warehouse by organization.': '조직 웨어하우스 검색하십시오.',
'Search for warehouse by text.': '텍스트 웨어하우스 검색하십시오.',
'Search here for a person record in order to:': '여기에 검색 개인 레코드를 in order to:',
'Search messages': '메시지 검색',
'Search': '검색',
'Searching for different groups and individuals': '다른 그룹 및 개인에 대한 검색',
'Secondary Server (Optional)': '차 서버 (선택적)',
'Seconds must be a number between 0 and 60': '초는 0과 60 사이의 숫자이어야 합니다.',
'Section Details': '섹션 세부사항',
'Section deleted': '섹션 삭제',
'Section updated': '갱신된 절',
'Sections': '섹션',
'Sector Details': '섹터 세부사항',
'Sector added': '추가된 섹터',
'Sector deleted': '삭제된 섹터',
'Sector updated': '갱신된 부문',
'Sector': '섹터',
'Sector(s)': '섹터 (s)',
'Sectors': '섹터',
'Security Status': '보안 상태',
'Security problems': '보안 문제점',
'See All Entries': '모든 항목을 참조하십시오',
'See all': '모두 보기',
'See unassigned recovery requests': '지정되지 않은 복구 요청을 참조하십시오.',
'Select Items from the Request': '요청 항목을 선택하십시오.',
'Select Items from this Inventory': '이 재고 품목 선택',
'Select a location': '위치 선택',
'Select a question from the list': '이 목록에서 질문을 선택하십시오',
'Select a range for the number of total beds': '의료용 총 수에 대한 범위를 선택하십시오.',
'Select all that apply': '적용되는 모든 항목 선택',
'Select an Organization to see a list of offices': '조직 사무실 목록 보려면,이 옵션을 선택하십시오.',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': '각 관련된 평가 및 활동에 대한 오버레이하는 차이를 식별해야 합니다.',
'Select the person assigned to this role for this project.': '이 프로젝트에 대해 이 역할에 지정된 사용자를 선택하십시오.',
'Select to show this configuration in the Regions menu.': 'region 이 메뉴에서 구성을 선택하십시오.',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'sms 전송에 여부에 대한 모뎀, tropo 또는 기타 게이트웨이 선택합니다',
'Send Alerts using Email &/or SMS': '경보 전자 우편 및/또는 sms 를 사용하여 송신',
'Send Commitment as Shipment': '확약 (shipment 송신',
'Send New Shipment': '새 shipment 송신',
'Send Notification': '통지 전송',
'Send Shipment': '배송은 송신',
'Send a message to this person': '이 사용자에게 송신',
'Send a message to this team': '이 팀에 보내기',
'Send from %s': '% 에 보내기',
'Send message': '메시지 보내기',
'Send new message': '새 메시지 송신',
'Send': '보내기',
'Sends & Receives Alerts via Email & SMS': '송신합니다 & 수신하고 경보를 전자 우편 및 sms 통해',
'Senior (50+)': 'senior (50+)',
'Sent By Person': '에서 보낸 사람',
'Sent By': '보낸 사람',
'Sent Item Details': '보낸 항목 세부사항',
'Sent Item deleted': '보낸 항목 삭제',
'Sent Item updated': '보낸 항목 갱신',
'Sent Shipment Details': '송신된 발송물 세부사항',
'Sent Shipment canceled and items returned to Inventory': '송신된 shipment 취소되고 항목을 명세로 리턴됩니다',
'Sent Shipment canceled': '송신된 shipment 취소됨',
'Sent Shipment updated': '송신된 shipment 갱신된',
'Sent Shipments': '송신된 운송물을',
'Sent': '보낸 문서',
'Separated children, caregiving arrangements': '분리된 하위, caregiving 협정',
'Serial Number': '일련 번호',
'Series': '시리즈',
'Server': '서버',
'Service Catalog': '서비스 카탈로그',
'Service or Facility': '서비스 또는 기능',
'Service profile added': '서비스 프로파일 추가',
'Service profile deleted': '서비스 프로파일 삭제',
'Service profile updated': '서비스 프로파일 갱신',
'Service': '서비스',
'Services Available': '서비스 사용 가능',
'Services': '서비스',
'Set Base Site': '기본 사이트 설정',
'Set By': '다음 기준으로 설정',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'true 누가 mapadmins 아닌 사용자가 위치 계층 구조 레벨을 편집할 수 있도록 설정하십시오.',
'Setting Details': '세부사항 설정',
'Setting added': '설정 추가',
'Setting deleted': '삭제된 설정',
'Setting updated': '갱신 설정',
'Settings updated': '설정 갱신',
'Settings were reset because authenticating with Twitter failed': '인증 설정을 twitter 로 재설정된 때문에 실패했습니다.',
'Settings which can be configured through the web interface are available here.': '웹 인터페이스를 통해 구성할 수 있는 설정은 다음과 같습니다.',
'Settings': '설정',
'Severe': 'severe',
'Severity': '심각도',
'Share a common Marker (unless over-ridden at the Feature level)': '공통 표시문자 (겹쳐썼기 기능 레벨이 아니면 )를 공유하시오.',
'Shelter & Essential NFIs': '피난처 & 기본적인 NFI들',
'Shelter Details': '피난처 세부사항',
'Shelter Name': '피난처이름',
'Shelter Registry': 'shelter 레지스트리',
'Shelter Service Details': 'shelter 서비스 세부사항',
'Shelter Service added': 'shelter 서비스 추가',
'Shelter Service deleted': 'shelter 서비스가 삭제되었습니다',
'Shelter Service updated': 'shelter 서비스 갱신',
'Shelter Service': 'shelter 서비스',
'Shelter Services': 'shelter 서비스',
'Shelter Type Details': 'shelter 유형 세부사항',
'Shelter Type added': 'shelter 유형 추가',
'Shelter Type deleted': 'shelter 유형 삭제',
'Shelter Type updated': 'shelter 유형 갱신',
'Shelter Type': 'shelter 유형',
'Shelter Types and Services': 'shelter 유형 및 서비스',
'Shelter Types': 'shelter 유형',
'Shelter added': '은신처가 더해졌다',
'Shelter deleted': '은신처가 지워젔다',
'Shelter updated': 'shelter 갱신',
'Shelter': '피난처',
'Shelter/NFI Assistance': 'shelter/nfi 지원',
'Shipment Created': '작성된 운송물',
'Shipment Items received by Inventory': '선적 항목 받은 재고',
'Shipment Items sent from Inventory': '선적 항목 보낸 명세',
'Shipment Items': '선적 항목',
'Shipment to Send': 'shipment 보내십시오',
'Shipments To': '운송물을 수',
'Shipments': '납품',
'Shooting': '해결',
'Short Assessment': '짧은 평가',
'Short Description': '간단한 설명',
'Show Checklist': '체크 표시',
'Show Details': '세부사항 표시',
'Show Map': '맵 표시',
'Show Region in Menu?': '메뉴에서 region?',
'Show on Map': '맵에 표시',
'Show on map': '맵에 표시',
'Sign-up as a volunteer': '사인 까지 자발적으로)',
'Sign-up for Account': '계정에 대한 사인업',
'Sign-up succesful - you should hear from us soon!': '사인업 성공-다음에 즉시 에서 들을 합니다!',
'Sindhi': '신디어',
'Site Administration': '사이트 관리',
'Site': '사이트',
'Situation Awareness & Geospatial Analysis': '상황 인식 및 geospatial 분석',
'Situation': '상황',
'Sketch': '스케치',
'Skill Catalog': '기술 카탈로그',
'Skill Details': '기술 항목 정보',
'Skill Equivalence Details': '기술 동급에 대한 세부사항',
'Skill Equivalence added': '기술 동급 내용이 추가되었습니다.',
'Skill Equivalence deleted': '기술 동급 내용이 삭제되었습니다',
'Skill Equivalence updated': '기술 동급내용이 업데이트되었습니다',
'Skill Equivalence': '기술 equivalence',
'Skill Equivalences': '기술 동급들',
'Skill Provision Catalog': '기술 조항 카탈로그',
'Skill Provision Details': '기술 조항 세부사항',
'Skill Provision added': '기술 제공이 추가되었습니다.',
'Skill Provision deleted': '기술 조항이 삭제되었습니다',
'Skill Provision updated': '기술 조항이 업데이트되었습니다',
'Skill Provision': '기술 제공',
'Skill Provisions': '기술 조항들',
'Skill Status': '항목 상태',
'Skill TYpe': '기술 유형',
'Skill Type Catalog': '기술 유형 카탈로그',
'Skill Type Details': '기술 유형 세부사항',
'Skill Type added': '기술 유형 추가',
'Skill Type deleted': '기술 유형 삭제',
'Skill Type updated': '기술 유형 갱신',
'Skill Types': '스킬 유형',
'Skill added': '기술 추가',
'Skill deleted': '스킬 삭제',
'Skill updated': '기술 갱신',
'Skill': '기술',
'Skills Catalog': '기술 카탈로그',
'Skills Management': '기술 항목 관리',
'Skills': '기술',
'Skype ID': 'skype id',
'Slope failure, debris': '기울기가 실패, 이물질을',
'Small Trade': '소규모 거래',
'Smoke': '연기',
'Snapshot Report': '스냅샷 보고서',
'Snapshot': '스냅샷',
'Snow Fall': '눈 복귀',
'Snow Squall': '눈 squall',
'Soil bulging, liquefaction': '토양 bulging, liquefaction',
'Solid waste': '솔리드 폐기물',
'Solution Details': '솔루션 세부사항',
'Solution Item': '솔루션 항목',
'Solution added': '솔루션 추가',
'Solution deleted': '솔루션 삭제',
'Solution updated': '솔루션 갱신',
'Solution': 'SOLUTION',
'Solutions': '솔루션',
'Some': 'SOME',
'Sorry that location appears to be outside the area of the Parent.': '죄송합니다. 해당 위치에 상위 영역 외부에 나타납니다.',
'Sorry that location appears to be outside the area supported by this deployment.': '죄송합니다. 해당 위치에 이 deployment 의해 지원되는 영역 외부에 나타납니다.',
'Sorry, I could not understand your request': '죄송합니다, 지금 사용자의 요청을 이해할 수 없습니다.',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': '죄송합니다, mapadmin 역할을 가진 사용자만 위치 그룹을 작성할 수 있습니다.',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': '죄송합니다, mapadmin 역할을 가진 사용자만 이 위치를 편집할 수 있습니다',
'Sorry, something went wrong.': '죄송합니다.',
'Sorry, that page is forbidden for some reason.': '죄송합니다, 페이지 어떤 이유로 금지됩니다.',
'Sorry, that service is temporary unavailable.': '죄송합니다, 서비스 임시 사용 불가능합니다.',
'Sorry, there are no addresses to display': '죄송합니다, 주소를 표시할 수 있습니다',
'Source ID': '소스 ID',
'Source Time': '자원 시간',
'Source': 'SOURCE',
'Sources of income': '수입원',
'Space Debris': '우주 잔해',
'Spanish': '스페인어',
'Special Ice': '특수 얼음',
'Special Marine': '특수 marine',
'Specialized Hospital': '특수화된 병원',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': '특정 영역 (예: 빌드/미팅룸) 위치 내에서 이 사람/그룹이 표시됩니다.',
'Specific locations need to have a parent of level': '특정 위치에 상위 레벨의 있어야 합니다',
'Specify a descriptive title for the image.': '이미지에 대한 설명적 제목을 지정하십시오.',
'Specify the bed type of this unit.': '이 장치의 bed 유형을 지정하십시오.',
'Specify the number of available sets': '사용 가능한 세트 수를 지정하십시오.',
'Specify the number of available units (adult doses)': '사용 가능한 장치 (성인 doses) 의 번호를 지정하십시오.',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'ren-lactate 또는 동급 솔루션 중 사용 가능한 장치 (litres) 의 번호를 지정하십시오.',
'Specify the number of sets needed per 24h': '24h 당 필요한 세트 수를 지정하십시오.',
'Specify the number of units (adult doses) needed per 24h': '24h 당 필요한 장치 (성인 doses) 의 번호를 지정하십시오.',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'ren-lactate 또는 24h 당 필요한 동등한 솔루션의 유닛 (litres) 의 번호를 지정하십시오.',
'Spherical Mercator?': '구면 mercator?',
'Spreadsheet Importer': '스프레드시트 임포터',
'Spreadsheet uploaded': '스프레드시트 업로드',
'Spring': '봄',
'Staff & Volunteers': '직원 및 volunteers',
'Staff ID': '직원 id',
'Staff Member Details': '스태프 구성원 세부사항',
'Staff Members': '스태프 구성원',
'Staff Record': '직원 레코드',
'Staff Type Details': '직원 유형 세부사항',
'Staff Type added': '직원 유형 추가',
'Staff Type deleted': '직원 유형 삭제',
'Staff Type updated': '직원 유형 갱신',
'Staff Types': '직원 유형',
'Staff and Volunteers': '직원 및 volunteers',
'Staff member added': '스태프 구성원 추가',
'Staff present and caring for residents': '스태프 및 해결방법 거주자에게 대한',
'Staff': '스태프',
'Staffing': '스탭핑',
'Stairs': '계단',
'Start Date': '시작 날짜',
'Start date': '시작 날짜',
'Start of Period': '기간 시작',
'State': '주(US 전용)',
'Stationery': '개인양식',
'Status Report': '상태 보고서',
'Status Updated': '상태 갱신',
'Status added': '상태 추가',
'Status deleted': '상태 삭제',
'Status of clinical operation of the facility.': '기능의 임상 조작 상태.',
'Status of general operation of the facility.': 'facility 일반 조작 상태.',
'Status of morgue capacity.': 'morgue 용량 상태.',
'Status of operations of the emergency department of this hospital.': '이 병원 의 긴급 부서의 조작 상태.',
'Status of security procedures/access restrictions in the hospital.': '병원 보안 절차를/액세스 제한 상태.',
'Status of the operating rooms of this hospital.': '이 병원 운영 체제입니다 공간의 상태.',
'Status updated': '상태 갱신',
'Status': 'STATUS',
'Steel frame': '강철 프레임',
'Stolen': '분실',
'Store spreadsheets in the Eden database': '상점 스프레드시트는 eden 데이터베이스의',
'Storeys at and above ground level': 'storeys 및 위 접지선 레벨',
'Storm Force Wind': 'force 심한 바람',
'Storm Surge': '스톰 surge',
'Street Address': '주소',
'Strong Wind': '강한 바람',
'Structural Hazards': '구조적 위험',
'Structural': '구조적',
'Style Field': '양식 필드',
'Style Values': '스타일 값',
'Sub-type': '하위 유형',
'Submission successful - please wait': '제출 성공-기다리십시오',
'Submission successful - please wait...': '제출 성공-기다리십시오...',
'Submit New (full form)': '새 제출 (전체 양식)',
'Submit New (triage)': '새 제출 (triage)',
'Submit New': '새로 제출',
'Submit a request for recovery': '복구 요청 제출',
'Submit new Level 1 assessment (full form)': '제출 새 레벨 1 평가 (전체 양식)',
'Submit new Level 1 assessment (triage)': '제출 새 레벨 1 평가 (triage)',
'Submit new Level 2 assessment': '제출 새 레벨 2 평가',
'Subscription Details': '등록 세부사항',
'Subscription added': '등록이 추가됨',
'Subscription deleted': 'subscription 삭제',
'Subscription updated': '서브스크립션 갱신',
'Subscriptions': '등록',
'Subsector Details': 'subsector 세부사항',
'Subsector added': 'subsector 추가',
'Subsector deleted': 'subsector 삭제됨',
'Subsector updated': 'subsector 갱신',
'Subsistence Cost': 'subsistence 비용',
'Suggest not changing this field unless you know what you are doing.': '제안합니다. 사용자가 아니면 이 필드를 변경합니다.',
'Summary by Administration Level': '요약 레벨 관리',
'Summary': '요약',
'Sunday': '일요일',
'Supply Chain Management': '공급망 관리(SCM)',
'Support Request': '지원요청',
'Support Requests': '지원요청들',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': '지위계급의 목록작성을 도와줌으로서 위기 관리자들 그룹의 의사결정을 지원한다',
'Surgery': '수술',
'Survey Answer Details': '서베이 응답 세부사항',
'Survey Answer added': '서베이 응답 추가',
'Survey Answer deleted': '서베이 응답 삭제',
'Survey Answer updated': '서베이 응답 갱신',
'Survey Answer': '조사 응답',
'Survey Module': '모듈 조사',
'Survey Name': '설문 조사 이름',
'Survey Question Details': '서베이 질문 세부사항',
'Survey Question Display Name': '서베이 질문 표시 이름',
'Survey Question added': '서베이 질문 추가',
'Survey Question deleted': '서베이 질문 삭제',
'Survey Question updated': '서베이 질문 갱신된',
'Survey Question': '서베이 질문',
'Survey Series Details': '서베이 시리즈 세부사항',
'Survey Series Name': '서베이 시리즈 이름',
'Survey Series added': '서베이 시리즈 추가',
'Survey Series deleted': '서베이 시리즈 삭제',
'Survey Series updated': '서베이 일련의 갱신',
'Survey Series': '서베이 시리즈',
'Survey Template Details': '서베이 템플리트 세부사항',
'Survey Template added': '서베이 템플리트 추가',
'Survey Template deleted': '서베이 템플리트 삭제',
'Survey Template updated': '서베이 템플리트 갱신',
'Survey Template': '설문 조사 템플리트',
'Survey Templates': '서베이 템플리트',
'Symbology': '바코드',
'Sync Conflicts': '동기화 충돌',
'Sync History': '동기화 히스토리',
'Sync Now': '지금 동기화',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': '동기화 파트너 인스턴스 또는 피어에서 (sahanaeden, sahanaagasti, ushahidi 등. ) 는 sync 함께 할 수 있습니다. 여기서 오른쪽으로 동기화 참여자 추가, sync 비즈니스파트너를 위한 검색 및 수정할 수 있는 페이지로 이동할 수 있는 링크를 누르십시오.',
'Sync Partners': '동기화 파트너',
'Sync Pools': '풀 동기',
'Sync Schedule': '동기화 스케줄',
'Sync Settings': '동기화 설정',
'Sync process already started on': '동기화 프로세스가 이미 시작된 에서',
'Synchronisation': '동기화',
'Synchronization Conflicts': '동기화 충돌',
'Synchronization Details': '동기화 세부사항',
'Synchronization History': '동기화 히스토리',
'Synchronization Peers': '동기화 피어와',
'Synchronization Settings': '동기화 설정',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': '동기화 사용자가 다른 사용자와 있고 다른 피어에서 최신 데이터 자신의 데이터베이스 갱신 데이터를 공유할 수 있습니다. 이 페이지를 sahana eden 의 동기화 기능을 사용하는 방법에 대한 정보를 제공합니다.',
'Synchronization not configured.': '동기화가 구성되지 않았습니다.',
'Synchronization settings updated': '동기화 설정 갱신',
'Synchronization': '동기화',
'Syncronisation History': 'syncronisation 히스토리',
'Tags': '태그',
'Take shelter in place or per <instruction>': '대신 shelter 또는 당<instruction>',
'Task Details': '태스크 세부사항',
'Task List': '태스크 목록',
'Task Status': '태스크 상태',
'Task added': '태스크 추가',
'Task deleted': '태스크가 삭제됨',
'Task updated': '태스크 갱신됨',
'Tasks': '태스크',
'Team Description': '팀 설명',
'Team Details': '팀 세부사항',
'Team ID': '팀 ID',
'Team Id': '팀 ID',
'Team Leader': '팀 리더',
'Team Member added': '팀 구성원 추가',
'Team Members': '팀 구성원',
'Team Name': '팀 이름',
'Team Type': '팀 유형',
'Team added': '팀 추가',
'Team deleted': '팀 삭제',
'Team updated': '팀 갱신된',
'Team': '팀',
'Teams': '팀',
'Technical testing only, all recipients disregard': '기술 테스트, 모든 사람 무시하십시오',
'Telecommunications': '통신',
'Telephone': '전화번호',
'Telephony': '전화 통신',
'Temp folder %s not writable - unable to apply theme!': 'temp 폴더가% unable 테마를 적용할 가능-s 않습니다!',
'Template file %s not readable - unable to apply theme!': '템플리트 파일 (s not installed — unable 테마를 적용할 수-!',
'Templates': '템플리트',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': '다섯 번째 레벨-국가 관리 부서 (예: 결정 또는 우편 디비전) 의 용어. 이 레벨은 종종 사용됩니다.',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': '네 번째 레벨 국가 관리 부서 (예: village, neighborhood 또는 precinct) 내의 용어입니다.',
'Term for the primary within-country administrative division (e.g. State or Province).': '기본 대한-국가 관리 부서 (예: 도) 내에 용어.',
'Term for the secondary within-country administrative division (e.g. District or County).': '보조 대한-국가 관리 부서 (예: 지역 또는 국가) 내에 용어.',
'Term for the third-level within-country administrative division (e.g. City or Town).': '세 번째 레벨-국가 관리 부서 (예: city 또는 town) 의 용어.',
'Term for the top-level administrative division (i.e. Country).': '용어 최상위 레벨 관리 디비전에 (국가:).',
'Territorial Authority': 'territorial 권한',
'Tertiary Server (Optional)': '차 서버 (선택적)',
'Text Color for Text blocks': '텍스트 color 텍스트 블록',
'Text': '텍스트',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': '이메일 유효성 감사합니다. 사용자 계정 활성화될 때 사용자 계정은 시스템 관리자 (% s ). 전자 우편으로 알림을 수신합니다 의해 승인 보류 중입니다.',
'Thanks for your assistance': '사용자의 thanks 대한 지원',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '\\ " query\\ " 조건 \\ " db.table1.field1==\'value\'\\ " 입니다. \\ 같이 " db.table1.field1 == db.table2.field2\\ " sql 조인의 결과.',
'The Area which this Site is located within.': '이 사이트 영역 내에 위치합니다.',
'The Assessments module allows field workers to send in assessments.': '평가를 모듈 필드에 작업자 평가를 에 보낼 수 있습니다.',
'The Author of this Document (optional)': '작성자가 문서 (선택적)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': '건물 asssesments 모듈 빌드 안전 평가할 수 있도록, 지진 후.',
'The Camp this Request is from': '(캠프 이 요청에서.',
'The Camp this person is checking into.': '(캠프 이 사용자 에 점검 중입니다.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '사용자/그룹, 일반 (보고) 또는 precise (맵에서) 의 현재 위치. 몇 자 가능한 위치에서 검색 을 입력하십시오.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': '전자 우편 주소 승인 요청 (일반적으로 이 그룹을 메일을 아닌 개별 수 있습니다). 이 필드가 공백인 경우 요청은 자동으로 도메인 일치하는 승인됩니다.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': '인시던트 보고 시스템 일반 보고서 인시던트 및 이 추적하도록 허용합니다.',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '일반 (보고) 또는 precise (맵에서), 위치, 개인,. 몇 자 가능한 위치에서 검색 을 입력하십시오.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '일반 (보고) 또는 precise (맵에서), 위치, 개인,. 몇 자 가능한 위치에서 검색 을 입력하십시오.',
'The Media Library provides a catalog of digital media.': '매체 라이브러리 디지털 매체 카탈로그를 제공합니다.',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': '메시징 모듈은 sahana 시스템의 기본 통신 허브입니다. sms & 전자 다양한 그룹 및 개인에게 전에 경고 및/또는 메시지, 도중 및 후에 피해 전송하는 데 사용됩니다.',
'The Organization Registry keeps track of all the relief organizations working in the area.': '조직 레지스트리에서 영역에 대한 모든 릴리프 조직을 추적합니다.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': '프로젝트 추적 모듈 활동 작성 요구를 평가 간격이 충족시킬 수 있습니다.',
'The Role this person plays within this hospital.': '이 사람 이 병원 내 재생합니다.',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '이 shelter 레지스트리는 모든 shelters 및 저장합니다 관한 기본 세부사항을 추적합니다. 다른 모듈과 shelter, 서비스 사용 등 연관된 사용자 추적하기 위해 협업하는',
'The Shelter this Request is from': '이 shelter 이 요청에서.',
'The Shelter this person is checking into.': 'shelter 는 이 사용자 에 점검 중입니다.',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'url 그의 계층을 맵 브라우저를 패널을 통해 사용 가능한 웹 맵핑할 서비스 (wms) 의 getcapabilities 페이지.',
'The URL of your web gateway without the post parameters': 'url 을 사용하여 web gateway 의 post 매개변수 없이',
'The URL to access the service.': '서비스 url 에 액세스하십시오.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': '고유 id (uuid) 이 정부 지정된 기능.',
'The asset must be assigned to a site OR location.': '사이트 자산 또는 위치를 지정해야 합니다.',
'The attribute which is used for the title of popups.': '이 팝업을 제목은 사용되는 속성.',
'The attribute within the KML which is used for the title of popups.': '이 속성은 kml 내에서 팝업을 제목으로 사용됩니다.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': '속성 (s) kml 내에 팝업을 본문에 사용됩니다. (속성 사이에 공백을 사용하십시오.)',
'The body height (crown to heel) in cm.': '본문 (힐 수 crown) cm.',
'The country the person usually lives in.': '국가, 개인의 삶을 보통.',
'The default Organization for whom this person is acting.': '이 사용자의 기본 역할을 조직.',
'The default Organization for whom you are acting.': '에 대한 구입처에 역할을 하는 기본 조직.',
'The duplicate record will be deleted': '중복 레코드는 삭제됩니다',
'The first or only name of the person (mandatory).': '대상자의 이름 (필수)',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'url 의 양식, wms url 경로에 대한/웹/맵/서비스 는 http://your/web/map/service?service=WMS&request=GetCapabilities 입니다.',
'The language you wish the site to be displayed in.': '에 표시되는 사이트의 언어입니다.',
'The list of Brands are maintained by the Administrators.': '브랜드 목록에서 administrators 의해 유지보수됩니다.',
'The list of Catalogs are maintained by the Administrators.': '카탈로그 목록에서, 관리자에 의해 유지보수됩니다.',
'The map will be displayed initially with this latitude at the center.': '맵이 초기에 center 에서 이 latitude 함께 표시됩니다.',
'The map will be displayed initially with this longitude at the center.': '맵이 초기에 center 에서 이 경도를 함께 표시됩니다.',
'The minimum number of features to form a cluster.': '최소 기능 클러스터를 형성합니다.',
'The name to be used when calling for or directly addressing the person (optional).': '이름 또는 사용자 (선택사항) 주소 직접 호출할 때 사용할 수 있습니다.',
'The next screen will allow you to detail the number of people here & their needs.': '다음 화면에서는 세부사항으로 사람들이 여기에 & 고객의 요구에 수가 있습니다.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': '이 수가 작거나 항목의 한 측정 단위에 있는 다른 항목의 측정 단위',
'The number of pixels apart that features need to be before they are clustered.': '픽셀 수를 별도로 자신이 클러스터되는 전에 기능을 합니다.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '표시 맵 주위로 다운로드할 수 바둑판식. 영, 1 페이지를 빠르게 로드하고, 숫자가 커질수록 후속 초점이동을 빠릅니다.',
'The person at the location who is reporting this incident (optional)': '개인이 위치에 있는 이 사건 (선택사항) 보고',
'The post variable containing the phone number': 'post 변수 전화번호를 포함하는',
'The post variable on the URL used for sending messages': 'post 변수 메시지를 보내는 데 사용되는 url',
'The post variables other than the ones containing the message and the phone number': 'post 변수를 다른 메시지와 전화 번호가 포함된 것 이외의',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': '모뎀이 연결된-/dev/ttyUSB0 직렬 포트, linux 등 com2, com2 windows 에서 등에 대한',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': '서버가 브라우저에서 요청을 채우기 위해 액세스하는 다른 서버로부터 시기적절한 응답을 수신하지 못했습니다.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': '서버가 브라우저에서 요청을 채우기 위해 액세스하는 다른 서버에서 올바르지 않은 응답을 수신했습니다.',
'The site where this position is based.': '이 사이트 위치를 기반으로 합니다.',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': '기능을 스태프 responsibile 도움을 요청할 수 있습니다. 요청자의 요청을 완료할 때까지 확인합니다 commitments 이 요청에 대해 그러나 요청을 열린 상태로 만들 수 있습니다.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': '주제 이벤트 더 이상 위협 또는 관련된 모든 조치를 수행하십시오 되지 설명되어 있습니다<instruction>',
'The time at which the Event started.': '이벤트가 시작된 시간입니다.',
'The token associated with this application on': '토큰 이 연관된 응용프로그램',
'The unique identifier which identifies this instance to other instances.': '고유 id 이 인스턴스가 다른 인스턴스를 식별합니다.',
'The way in which an item is normally distributed': '항목이 정상적으로 분산되는 방법을',
'The weight in kg.': 'kg 의 가중치입니다.',
'Theme Details': '테마 세부사항',
'Theme added': '테마 추가',
'Theme deleted': '테마 삭제',
'Theme updated': '갱신된 테마',
'Theme': '테마',
'Themes': '주제',
'There are errors': '오류가 있는',
'There are insufficient items in the Inventory to send this shipment': '충분하지 않은 항목 인벤토리에 있는 이 shipment 전송할 수 있습니다',
'There is no address for this person yet. Add new address.': '이 사용자에 대한 주소가 아직 없음. 새 주소를 추가하십시오.',
'These are settings for Inbound Mail.': '이들 인바운드 메일에 대한 설정이 있습니다.',
'These are the Incident Categories visible to normal End-Users': '이 표시 정상 일반 사용자에게 인시던트의 범주입니다',
'These need to be added in Decimal Degrees.': '이러한 decimal 도 추가해야 합니다.',
'They': '다른 사용자',
'This Group has no Members yet': '멤버가 현재 등록된',
'This Team has no Members yet': '멤버가 현재 등록된',
'This appears to be a duplicate of': '이 중복으로 나타납니다',
'This file already exists on the server as': '이 파일은 이미 존재합니다.',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': '이 레벨 생성 경우 이 적합합니다. 로 인한 수정 후 이 레벨을 완료되지 않도록 이 false 로 설정할 수 있습니다.',
'This is the way to transfer data between machines as it maintains referential integrity.': '이는 참조 무결성을 유지보수하는 시스템 간에 데이터를 전송할 수 있는 방법입니다.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': '이는 참조 무결성을 유지보수하는 시스템 간에 데이터를 전송할 수 있는 방법입니다. .. 중복 데이터 1 수동으로 제거해야 합니다!',
'This level is not open for editing.': '이 레벨은 열고 편집할 수 없습니다.',
'This might be due to a temporary overloading or maintenance of the server.': '이 때문에 임시 과부하 또는 유지보수로 될 수 있습니다.',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': '이 모듈은 인벤토리 항목 및 요청된 설비 의 자원 사이의 운송되도록 있습니다.',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': '이 모듈의 연습을 다 & 이벤트 시나리오 계획할 수 있습니다. 이러한 쉽게 mobilized 수 있는 적절한 자원 (인력, 자산 및 설비) 을 할당할 수 있습니다.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': '이 페이지에서는 지난 syncs 의 로그를 표시합니다. 이 페이지로 가려면 아래 링크를 누르십시오.',
'This screen allows you to upload a collection of photos to the server.': '이 화면에서는 사진 콜렉션을 서버로 업로드할 수 있습니다.',
'This setting can only be controlled by the Administrator.': '이 설정은 관리자가 제어할 수 있습니다.',
'This shipment has already been received.': '이 shipment 이미 받았습니다.',
'This shipment has already been sent.': '이 shipment 이미 전송되었습니다.',
'This shipment has not been received - it has NOT been canceled because can still be edited.': '이 shipment 때문에 이를 계속 편집할 수 취소되었음-수신되지 않았습니다.',
'This shipment has not been sent - it has NOT been canceled because can still be edited.': '이 shipment 때문에 이를 계속 편집할 수 취소되었음-전송되지 않았습니다.',
'This shipment will be confirmed as received.': '이 shipment 받은 확인할 수 있습니다.',
'Thursday': '목요일',
'Ticket Details': '티켓 세부사항',
'Ticket ID': '티켓 ID',
'Ticket added': '티켓 추가',
'Ticket deleted': '티켓 삭제됨',
'Ticket updated': '티켓 갱신',
'Ticket': '티켓',
'Ticketing Module': 'ticketing 모듈',
'Tickets': '티켓',
'Tilt-up concrete': '틸트-up)',
'Timber frame': 'timber 프레임',
'Timeline Report': '타임라인 보고서',
'Timeline': '타임라인',
'Title to show for the Web Map Service panel in the Tools panel.': '제목 도구 패널에서 웹 맵 서비스 패널을 표시합니다.',
'To Location': '대상 위치',
'To Person': '개인 수신',
'To begin the sync process, click the button on the right =>': '대한 sync 프로세스를 시작하려면, 마우스 => 단추를 누르십시오',
'To begin the sync process, click this button =>': '에 대한 동기화 프로세스를 시작하려면 이 단추를 누르십시오 =>',
'To create a personal map configuration, click': '개인용 맵 구성을 작성하려면 클릭하십시오',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': '데 openstreetmap 편집하려면 models/000_config. py 의 openstreetmap 설정을 편집해야 합니다',
'To search by job title, enter any portion of the title. You may use % as wildcard.': '에 의해 작업 제목, 제목 일부를 입력하십시오. % 와일드 카드로 사용할 수 있습니다.',
'To variable': '변수',
'To': '종료',
'Tools': '공구',
'Total # of Target Beneficiaries': '총 대상 받아야 중',
'Total # of households of site visited': '사이트 households 의 총 방문',
'Total Beds': '총 의료용',
'Total Beneficiaries': '총 받아야',
'Total Cost per Megabyte': '총 mb 수',
'Total Cost per Minute': '분당 총 비용',
'Total Monthly Cost': '월별 총 비용',
'Total Monthly Cost:': '월별 총 비용:',
'Total Monthly': '월별 총계',
'Total One-time Costs': '총 일회 비용',
'Total Persons': '총 사용자',
'Total Recurring Costs': '반복 총 비용',
'Total Unit Cost': '총 비용',
'Total Unit Cost:': '총 비용:',
'Total Units': '총 장치',
'Total gross floor area (square meters)': '총 매출총이익 바닥 영역 (제곱 미터로)',
'Total number of beds in this hospital. Automatically updated from daily reports.': '의료용 이 병원 총 수입니다. 자동으로 매일 보고서 에서 갱신되었습니다.',
'Total number of houses in the area': '총 영역에서 장착합니다.',
'Total number of schools in affected area': '총 받는 영역에 학교 중',
'Total population of site visited': '사이트의 총 방문',
'Total': '전체',
'Totals for Budget:': '총계를 예산:',
'Totals for Bundle:': '번들에 대한 총계:',
'Totals for Kit:': '상품 총계:',
'Tourist Group': '여행자 그룹',
'Town': '읍',
'Traces internally displaced people (IDPs) and their needs': '내부 추적 위치가 사람 (idps) 및 해당 하는',
'Tracing': '추적',
'Track Details': '세부사항 추적',
'Track deleted': '추적 삭제',
'Track updated': '갱신된 트랙',
'Track uploaded': '추적합니다 업로드된',
'Track with this Person?': '이 개인과 추적합니다?',
'Track': '트랙',
'Tracking of Projects, Activities and Tasks': '프로젝트 추적, 활동 및 태스크',
'Tracking of basic information on the location, facilities and size of the Shelters': '기본 추적 정보의 위치에 따라, 설비 및 shelters 의 크기',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'shelters 의 위치, 희생 distibution, 용량 및 작업분류 트랙',
'Tracks': '트랙',
'Traffic Report': '트래픽 보고서',
'Training Course Catalog': '교육 과정 카탈로그',
'Training Details': '교육 세부사항',
'Training added': '교육 추가',
'Training deleted': '연계 삭제',
'Training updated': '갱신된 교육',
'Training': '교육',
'Transit Status': '전송 상태',
'Transit': '운송',
'Transition Effect': '전환 효과',
'Transparent?': '투명합니다?',
'Transportation assistance, Rank': '교통, 랭크',
'Trauma Center': 'trauma center',
'Travel Cost': '여행 비용',
'Tropical Storm': 'tropical 먹회색',
'Tropo Messaging Token': 'tropo 토큰 전달',
'Tropo Settings': 'tropo 설정',
'Tropo Voice Token': 'tropo 음성 토큰',
'Tropo settings updated': 'tropo 설정 갱신',
'Truck': '트럭',
'Try checking the URL for errors, maybe it was mistyped.': '오류에 대해 url 검사, maybe it 입력했습니다.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': '화면 갱신/다시 로드 단추를 누르면 주소 표시줄에서 url 을 다시 시도하십시오.',
'Try refreshing the page or hitting the back button on your browser.': '페이지 새로 고침 또는 브라우저의 뒤로 단추를 누르면 시도하십시오.',
'Tuesday': '화요일',
'Twitter ID or #hashtag': 'twitter id 또는 #hashtag',
'Twitter Settings': 'twitter 설정',
'Twitter': '트위터',
'Type of Construction': '구현 유형',
'Type of water source before the disaster': '유형 (소스 피해 전',
'UN': '유엔',
'Un-Repairable': '복구 불가한',
'Unable to parse CSV file!': 'unable csv 파일을 구문 분석할 수 없습니다!',
'Unidentified': '미확인',
'Unit Cost': '단가',
'Unit added': '장치 추가',
'Unit deleted': '장치 삭제',
'Unit of Measure': '측정 단위',
'Unit updated': '갱신된 장치',
'Units': '단위',
'Unknown Peer': '알 수 없는 피어',
'Unknown type of facility': '알 수 없는 유형 기능',
'Unknown': '알 수 없음',
'Unresolved Conflicts': '분석되지 않은 충돌',
'Unsafe': '나타남',
'Unselect to disable the modem': '모뎀 불가능으로 선택',
'Unsent': '보내지 않음',
'Unsupported data format!': '지원되지 않는 데이터 형식!',
'Unsupported method!': '지원되지 않는 메소드!',
'Update Activity Report': '활동 보고서 갱신',
'Update Cholera Treatment Capability Information': 'cholera treatment 기능 정보 갱신',
'Update Request': '업데이트 요청',
'Update Service Profile': '서비스 프로파일 갱신',
'Update Status': '업데이트 상태',
'Update Task Status': '태스크 상태 갱신',
'Update Unit': '장치 갱신',
'Update if Master': '마스터 경우 갱신',
'Update if Newer': '최신이면 갱신',
'Update your current ordered list': '현재 정렬된 목록 갱신',
'Update': '업데이트',
'Updated By': '에 의해 업데이트 됨',
'Upload Photos': '사진 업로드',
'Upload Spreadsheet': '스프레드시트 업로드',
'Upload Track': '업로드 트랙',
'Upload a Spreadsheet': '스프레드시트 업로드',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': '이미지 파일 (bmp, gif, jpeg 또는 png), max. 업로드 300x300 픽셀!',
'Upload an image file here.': '이미지 파일을 업로드하십시오.',
'Upload an image, such as a photo': '이미지 (예: 사진 업로드',
'Urban Fire': 'fire 도시',
'Urban area': '영역 도시',
'Urdu': '우르두어',
'Urgent': '긴급',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '및, 또는 (...)|(...), 및 의 더 복잡한 조회를 빌드할 수 ~(...) 대한 (...)&(...).',
'Use Geocoder for address lookups?': '주소 찾아보기에 지오코더?',
'Use default': '기본값 사용',
'Use these links to download data that is currently in the database.': '이 링크는 현재 데이터베이스에 있는 데이터를 다운로드할 수 있습니다.',
'Used by IRS & Assess': 'IRS & Assess가 사용하는',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': '사용할 onhover 도구 및 클러스터 팝업을 유형을 구분할 수 있습니다.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': '사용할 onhover 도구 및 1 필드에 클러스터 팝업을 레코드를 구분하는 데 사용됩니다.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': '입력한 해당 위치의 위도 확인하는 데 사용됩니다. 해당 위치에 있는 자원 필터 목록 데 사용될 수 있습니다.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': '입력한 해당 위치의 경도 확인하는 데 사용됩니다. 해당 위치에 있는 자원 필터 목록 데 사용될 수 있습니다.',
'Used to import data from spreadsheets into the database': 'spreadsheets 데이터베이스로 데이터를 가져오는 데 사용됩니다',
'Used within Inventory Management, Request Management and Asset Management': '재고 관리, 관리 및 자산 관리 내에서 사용되는',
'User Account has been Disabled': '사용자 계정을 사용할 수 없습니다',
'User Details': '사용자 세부사항',
'User Management': '사용자 관리',
'User Profile': '사용자 프로파일',
'User Requests': '사용자 요청',
'User Updated': '사용자가 갱신되었습니다',
'User added': '사용자 추가됨',
'User already has this role': '이 역할은 사용자가 이미 있습니다',
'User deleted': '사용자 삭제됨',
'User updated': '사용자가 갱신되었습니다',
'Users removed': '사용자가 제거되었습니다.',
'Users': '사용자',
'Uses the REST Query Format defined in': '나머지 조회에 정의된 형식을 사용합니다',
'Utilities': '유틸리티',
'Utility, telecommunication, other non-transport infrastructure': '유틸리티, 통신, 기타 전송 인프라',
'Various Reporting functionalities': '다양한 보고 기능',
'Vehicle Crime': '차량 범죄',
'Vehicle Types': '자동차 유형',
'Vehicle': '차량',
'Verification Status': '검증 상태',
'Verified?': '확인되었습니다?',
'Verify password': '비밀번호 검증',
'Very Good': '매우 양호',
'Very High': '매우 높음',
'View Alerts received using either Email or SMS': '경보 보기 중 전자 우편 또는 sms 를 받은',
'View All': '모두 보기',
'View Error Tickets': '오류 티켓 보기',
'View Fullscreen Map': '보기 fullscreen 맵',
'View Image': '이미지 보기',
'View Items': '항목 보기',
'View On Map': '맵 보기',
'View Outbox': '편지함 보기',
'View Picture': '그림 열람',
'View Settings': '보기 설정',
'View Tickets': '티켓 보기',
'View and/or update their details': '보기 및/또는 해당 세부사항 갱신',
'View or update the status of a hospital.': '보기 또는 갱신 병원 의 상태.',
'View pending requests and pledge support.': '보류 중인 요청 및 pledge 지원 보기.',
'View the hospitals on a map.': '맵에서 이 병원 보십시오.',
'View/Edit the Database directly': '보기/데이터베이스 직접 편집',
'Village Leader': 'village 리더',
'Visible?': '표시하시겠습니까?',
'Visual Recognition': 'visual 인식',
'Volcanic Ash Cloud': 'volcanic ash 클라우드에',
'Volcanic Event': 'volcanic 이벤트',
'Volume (m3)': '볼륨 (m3)',
'Volunteer Availability': '가용성 지원자',
'Volunteer Details': '지원자 세부사항',
'Volunteer Information': '지원자 정보',
'Volunteer Management': '지원자 관리',
'Volunteer Project': '지원자 프로젝트',
'Volunteer Record': '지원자 레코드',
'Volunteer Request': '지원자 요청',
'Volunteer added': '지원자 추가됩니다',
'Volunteer availability added': '지원자 가용성 추가',
'Volunteer availability deleted': '자발적으로 삭제된 가용성',
'Volunteer availability updated': '지원자 가용성 갱신',
'Volunteer deleted': '자발적으로 삭제된',
'Volunteer details updated': '지원자 세부사항 갱신',
'Volunteers were notified!': 'volunteers 통지됩니다.',
'Volunteers': 'volunteers',
'Vote': '투표',
'Votes': '투표 수',
'WASH': '씻으십시오',
'Walking Only': '전용 워킹',
'Wall or other structural damage': '벽 또는 다른 구조적 손상',
'Warehouse Details': 'warehouse 세부사항',
'Warehouse added': '웨어하우스 추가됩니다',
'Warehouse deleted': '웨어하우스 삭제됨',
'Warehouse updated': '웨어하우스 갱신된',
'Warehouses': '웨어하우스',
'Water Sanitation Hygiene': '물 sanitation 예방',
'Water collection': '콜렉션 사용',
'Water gallon': '물 갤런',
'Water storage containers in households': '워터마크 스토리지 컨테이너를 households)',
'Water supply': '물 공급',
'Web Map Service Browser Name': '맵 서비스에서 웹 브라우저 이름',
'Web Map Service Browser URL': '맵 서비스에서 웹 브라우저 url',
'Website': '웹 사이트',
'Wednesday': '수요일',
'Weight (kg)': '무게 (kg)',
'Weight': '무게',
'Welcome to the Sahana Portal at': '이 sahana portal 에 오신 것을 환영합니다',
'Wheat': '밀색',
'When reports were entered': '입력된 보고서',
'Who is doing what and where': '누가 어떤 수행하고,',
'Who usually collects water for the family?': '일반적으로 사람을 수집하는 제품군용 워터마크?',
'Width (m)': '너비 (m)',
'Wild Fire': '와일드 fire',
'Wind Chill': '바람 chill',
'Window frame': '창 프레임',
'Winter Storm': '겨울 폭풍',
'Women of Child Bearing Age': '출산 가능한 시기의 여성들',
'Women participating in coping activities': '대처 활동에 참가하는 여성들',
'Women who are Pregnant or in Labour': '임신 중이거나 일을 하는 여성들',
'Womens Focus Groups': '여성 포커스 그룹',
'Wooden plank': '나무 plank',
'Wooden poles': '나무 폴',
'Working hours end': '작업 시간 종료',
'Working hours start': '작업 시간 시작',
'Working or other to provide money/food': '작업 또는 다른 돈을/식품 제공하기 위해',
'YES': '예',
'Year built': '건축 연도',
'Year of Manufacture': '제조 연도',
'Yellow': '노란색',
'Yes': '예',
'You are a recovery team?': '복구 팀?',
'You are attempting to delete your own account - are you sure you want to proceed?': '자신의 계정-삭제하려고 시도합니다 확실합니까 계속하시겠습니까?',
'You are currently reported missing!': '현재 보고됩니다 누락되었습니다!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': '설정값 섹션에서 동기화 모듈의 구성을 변경할 수 있습니다. 이 구성, uuid (unique identification number), 동기화 스케줄, beacon 서비스 등이 포함됩니다. 다음 링크를 눌러 동기화 설정 페이지로 이동하십시오.',
'You can click on the map below to select the Lat/Lon fields': '맵핑 아래 lat/₩ 필드를 선택할 수 있습니다',
'You can select the Draw tool': '사용자가 그리기 도구를 선택할 수 있습니다',
'You can set the modem settings for SMS here.': '모뎀 설정을 sms 여기에 설정할 수 있습니다.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '변환 도구를 gps 변환하기 위해 사용할 수 있는 좌표 또는 도/분/초.',
'You do not have permission for any facility to make a commitment.': '모든 기능에대한 약속을 결정할 권한이 없습니다',
'You do not have permission for any facility to make a request.': '모든 기능에 대한 요청을 할 권한이 없습니다',
'You do not have permission for any site to add an inventory item.': '모든 사이트에 대해 자원 명세 항목을 추가할 권한이 없습니다.',
'You do not have permission for any site to receive a shipment.': '권한이 있는 모든 사이트의 납품 받을 필요가 없습니다.',
'You do not have permission for any site to send a shipment.': '권한이 있는 모든 사이트의 납품 보낼 필요가 없습니다.',
'You do not have permission to cancel this received shipment.': '이 받은 shipment 취소할 수 없습니다.',
'You do not have permission to cancel this sent shipment.': '이 shipment 보낸 취소할 수 없습니다.',
'You do not have permission to make this commitment.': '이 확약 변경할 필요가 없습니다.',
'You do not have permission to receive this shipment.': '이 shipment 수신할 수 없습니다.',
'You do not have permission to send a shipment from this site.': '이 사이트에서 shipment 보낼 필요가 없습니다.',
'You do not have permission to send this shipment.': '이 shipment 전송할 수 있는 권한이 없습니다.',
'You have a personal map configuration. To change your personal configuration, click': '사용자가 개인용 맵 구성. 개인용 구성 변경, 누르십시오',
'You have found a dead body?': '사용자는 데드 본문을 찾을 수 있습니까?',
'You must be logged in to register volunteers.': 'volunteers 를 등록할 수 로그인해야 합니다.',
'You must be logged in to report persons missing or found.': '누락된 또는 찾을 사람이 보고서 에 로그인해야 합니다.',
'You must provide a series id to proceed.': '일련의 id 진행하려면 제공해야 합니다.',
'You should edit Twitter settings in models/000_config.py': '모델/000_config. py 에서 twitter 설정을 편집해야 합니다',
'Your current ordered list of solution items is shown below. You can change it by voting again.': '현재 솔루션 항목의 순서화된 목록 아래에 표시됩니다. 다시 voting 이를 변경할 수 있습니다.',
'Your post was added successfully.': 'post 가 성공적으로 추가되었습니다.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': '시스템 고유 id (uuid), 다른 컴퓨터를 중심으로 사용자가 식별할 수 있습니다. 사용자의 uuid 보기 동기화 -> sync 설정으로 이동하십시오. 또한 페이지의 다른 설정을 볼 수 있습니다.',
'Zero Hour': '이 시간',
'Zinc roof': 'roof 아연',
'Zoom Levels': '확대/축소 레벨',
'Zoom': '확대/축소',
'active': '활성화',
'added': '추가됨',
'all records': '모든 레코드',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': '예산 기반으로 인력 및 장비 비용, 모든 관리 오버헤드를 포함하여 개발할 수 있습니다.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': '조사 자연 재해 다음 손상을 평가하고 작성 및 관리할 수 있습니다.',
'an individual/team to do in 1-2 days': '개인/팀은 1-2 일 수',
'assigned': '지정됨',
'average': '평균',
'black': '검은색',
'blue': '파란색',
'brown': '갈색',
'by': '게시자',
'c/o Name': 'c/o 이름',
'can be used to extract data from spreadsheets and put them into database tables.': '스프레드시트 데이터를 추출하는 데 사용할 수 있고 데이터베이스 테이블로 이를 넣으십시오.',
'cancelled': '취소',
'check all': '모두 선택',
'click for more details': '자세한 내용은 누르십시오',
'completed': '완료됨',
'consider': '고려',
'curly': '중괄호',
'currently registered': '현재 등록된',
'dark': '어둡게',
'data uploaded': '데이터 업로드',
'database %s select': '데이터베이스% 를 선택하십시오.',
'database': '데이터베이스',
'deceased': '사망함',
'delete all checked': '모두 삭제 checked',
'deleted': '삭제',
'design': '설계',
'displaced': '프로덕트를',
'divorced': '이혼',
'done!': '완료',
'duplicate': '중복',
'edit': '편집',
'eg. gas, electricity, water': '예: 가스, 전기, 물',
'enclosed area': '영역 안에',
'export as csv file': 'csv 파일로 내보내기',
'feedback': '피드백',
'female': 'Female',
'flush latrine with septic tank': 'flush latrine septic 와 탱크',
'found': '발견됨',
'from Twitter': 'twitter 에서',
'green': '초록색',
'grey': '회색',
'here': '다음은',
'high': '높음',
'hourly': '시간별',
'households': '가구수',
'identified': '식별됨',
'ignore': '무시',
'in Deg Min Sec format': '정도 분 초 형식으로',
'in GPS format': 'gps 형식으로',
'inactive': '비활성화',
'insert new %s': '새로운 %s를(을) 삽입합니다',
'insert new': '새로 삽입',
'invalid request': '올바르지 않은 요청',
'invalid': '올바르지 않음',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': '모든 피해 희생 및 제품군에서 특히 정보를 casualties, evacuees 및 프로덕트를 사용자 식별된 저장할 수 있는 중앙 온라인 저장소입니다. 정보: 이름, 나이, 연락처, 등록정보창, 해당 위치 및 기타 세부사항을 캡처합니다. 사람 그림 및 finger 세부사항 인쇄 시스템 업로드할 수 있습니다. 또한 사용자 편의를 위해 효율성 및 group by 캡처될 수 있습니다.',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': '함께 제공하는 조직에서 완화 및 프로젝트의 항목 관리에 대한 복잡한 기능을 작업을 여러 하위 모듈로 구성됩니다 위해 계획된 것입니다. 이 흡입구 시스템, 웨어하우스 관리 시스템, 상품 추적, 공급망 관리, 차량 관리, 조달, 재무 추적 및 기타 자산 및 자원 관리 기능이 있습니다',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '이를 categorised 및 actioning 대한 적절한 라우트할 수 있도록 모든 수신 티켓을 추적합니다.',
'leave empty to detach account': '비어 있는 상태로 분리하려면 계정',
'legend URL': 'url 범례',
'light': '얇게',
'login': '로그인',
'low': '낮음',
'manual': '수동',
'medium': '중간',
'meters': '미터',
'missing': '누락',
'module allows the site administrator to configure various options.': '모듈 사이트 운영자 다양한 옵션을 구성할 수 있습니다.',
'module helps monitoring the status of hospitals.': '모듈 병원 의 상태 모니터링 도움이 됩니다.',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': '모듈 메커니즘입니다 공동으로 개발 피해 개요, 온라인 맵핑 (gis) 를 제공합니다.',
'more': '자세히',
'new record inserted': '새 레코드 삽입',
'new': '신규',
'next 100 rows': '다음 100 행',
'none': '없음',
'normal': '정상',
'not accessible - no cached version available!': '액세스할 수 없음-사용할 수 있는 캐시된 버전!',
'not accessible - using cached version from': '액세스할-캐시된 버전을 사용하여',
'not specified': '지정되지 않음',
'obsolete': '사용되지 않음',
'open defecation': '열기 defecation',
'optional': '선택적',
'or import from csv file': 'csv 파일에서 가져오기',
'other': '기타',
'over one hour': '한 시간 동안',
'people': '사용자',
'piece': '조각',
'postponed': '연기',
'preliminary template or draft, not actionable in its current form': '예비 템플리트 또는 드래프트, 실천 현재 양식으로',
'previous 100 rows': '이전 100 행',
'record does not exist': '레코드가 없음',
'record id': '레코드 ID',
'red': '적색',
'reports successfully imported.': '보고서 임포트했습니다.',
'representation of the Polygon/Line.': '다각형/행 표시.',
'retired': '퇴직함',
'river': '강',
'see comment': '주석 참조',
'selected': '선택',
'separated from family': '제품군에서 구분됩니다',
'separated': '분리',
'shaved': '면도 된',
'sides': '측면',
'sign-up now': '이제 사인업',
'specify': '지정',
'staff members': '스태프 구성원',
'staff': '스태프',
'state location': '위치 상태',
'state': '상태(State)',
'straight': '직선',
'suffered financial losses': '재정 손실을 발생함',
'table': '테이블',
'this': '그러면',
'to access the system': '시스템 액세스',
'total': '전체',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy 모듈 사용 중인 python-이 내에서 비 tropo twitter 지원 설치 합니다!',
'unable to parse csv file': 'unable csv 파일을 구문 분석할 수 없습니다.',
'uncheck all': '모두 선택 취소',
'unidentified': '미확인',
'unknown': '알 수 없음',
'unspecified': '지정되지 않음',
'unverified': '확인되지 않음',
'updated': '업데이트 날짜',
'updates only': '갱신사항만',
'verified': '확인',
'volunteer': '지원자',
'wavy': '물결선',
'weekly': '주별',
'white': '흰색',
'wider area, longer term, usually contain multiple Activities': '넓은 영역, 장기, 보통 여러 활동이 포함된',
'widowed': '사별',
'within human habitat': 'habitat 내의 사용자',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt 모듈 사용 중인 python-이 내에서 xls 출력 installing 합니다!',
'yes': '예',
}
|
devinbalkind/eden
|
languages/ko.py
|
Python
|
mit
| 244,291
|
[
"VisIt"
] |
34d1299e8b201c9ed4126d3d472cbb168f36cc1bfd4ce505d6818fa49331c273
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os, stat, errno, sys, shutil, zipfile, tarfile, tempfile, re, time, datetime, platform, subprocess, socket
from os.path import join, exists, dirname, basename
from argparse import ArgumentParser, REMAINDER
import xml.dom.minidom
import json, textwrap
import mx
import mx_unittest
from mx_unittest import unittest
from mx_gate import Task
import mx_gate
import mx_jvmci_makefile
_suite = mx.suite('jvmci')
""" The VMs that can be built and run along with an optional description. Only VMs with a
description are listed in the dialogue for setting the default VM (see get_vm()). """
_vmChoices = {
'jvmci' : 'VM triggered compilation is performed with a tiered system (C1 + Graal) and Graal is available for hosted compilation.',
'server' : 'Normal compilation is performed with a tiered system (C1 + C2) and Graal is available for hosted compilation.',
'client' : None, # VM compilation with client compiler, hosted compilation with Graal
'server-nojvmci' : None, # all compilation with tiered system (i.e., client + server), JVMCI omitted
'client-nojvmci' : None, # all compilation with client compiler, JVMCI omitted
'original' : None, # default VM copied from bootstrap JDK
}
# Aliases for legacy VM names
_vmAliases = {
'graal' : 'jvmci',
'server-nograal' : 'server-nojvmci',
'client-nograal' : 'client-nograal',
}
_JVMCI_JDK_TAG = 'jvmci'
""" The VM that will be run by the 'vm' command and built by default by the 'build' command.
This can be set via the global '--vm' option or the DEFAULT_VM environment variable.
It can also be temporarily set by using of a VM context manager object in a 'with' statement. """
_vm = None
""" The VM builds that will be run by the 'vm' command - default is first in list """
_vmbuildChoices = ['product', 'fastdebug', 'debug', 'optimized']
""" The VM build that will be run by the 'vm' command.
This can be set via the global '--vmbuild' option.
It can also be temporarily set by using of a VM context manager object in a 'with' statement. """
_vmbuild = _vmbuildChoices[0]
""" The current working directory to switch to before running the VM. """
_vm_cwd = None
""" The base directory in which the JDKs cloned from $JAVA_HOME exist. """
_installed_jdks = None
""" Prefix for running the VM. """
_vm_prefix = None
_make_eclipse_launch = False
_minVersion = mx.VersionSpec('1.8')
# max version (first _unsupported_ version)
_untilVersion = None
class JDKDeployedDist(object):
def __init__(self, name):
self._name = name
def dist(self):
return mx.distribution(self._name)
def deploy(self, jdkDir):
mx.nyi('deploy', self)
class JarJDKDeployedDist(JDKDeployedDist):
def __init__(self, name, partOfHotSpot=False):
JDKDeployedDist.__init__(self, name)
self.partOfHotSpot = partOfHotSpot
def targetDir(self):
mx.nyi('targetDir', self)
def _copyToJdk(self, jdkDir, target):
targetDir = join(jdkDir, target)
dist = self.dist()
mx.logv('Deploying {} to {}'.format(dist.name, targetDir))
copyToJdk(dist.path, targetDir)
def deploy(self, jdkDir):
self._copyToJdk(jdkDir, self.targetDir())
class ExtJDKDeployedDist(JarJDKDeployedDist):
def __init__(self, name, partOfHotSpot=False):
JarJDKDeployedDist.__init__(self, name, partOfHotSpot)
def targetDir(self):
return join('jre', 'lib', 'ext')
class LibJDKDeployedDist(JarJDKDeployedDist):
def __init__(self, name, partOfHotSpot=False):
JarJDKDeployedDist.__init__(self, name, partOfHotSpot)
def targetDir(self):
return join('jre', 'lib')
class JvmciJDKDeployedDist(JarJDKDeployedDist):
def __init__(self, name, partOfHotSpot=False, compilers=False):
JarJDKDeployedDist.__init__(self, name, partOfHotSpot)
self._compilers = compilers
def targetDir(self):
return join('jre', 'lib', 'jvmci')
def deploy(self, jdkDir):
JarJDKDeployedDist.deploy(self, jdkDir)
_updateJVMCIFiles(jdkDir)
if self._compilers:
_updateJVMCIProperties(jdkDir, self._compilers)
def _exe(l):
return mx.exe_suffix(l)
def _lib(l):
return mx.add_lib_suffix(mx.add_lib_prefix(l))
def _lib_dbg(l):
return mx.add_debug_lib_suffix(mx.add_lib_prefix(l))
class HotSpotVMJDKDeployedDist(JDKDeployedDist):
def dist(self):
name = mx.instantiatedDistributionName(self._name, dict(vm=get_vm(), vmbuild=_vmbuild), context=self._name)
return mx.distribution(name)
def deploy(self, jdkDir):
vmbuild = _vmbuildFromJdkDir(jdkDir)
if vmbuild != _vmbuild:
return
_hs_deploy_map = {
'jvmti.h' : 'include',
'sa-jdi.jar' : 'lib',
_lib('jvm') : join(relativeVmLibDirInJdk(), get_vm()),
_lib_dbg('jvm') : join(relativeVmLibDirInJdk(), get_vm()),
_lib('saproc') : relativeVmLibDirInJdk(),
_lib_dbg('saproc') : relativeVmLibDirInJdk(),
_lib('jsig') : relativeVmLibDirInJdk(),
_lib_dbg('jsig') : relativeVmLibDirInJdk(),
}
dist = self.dist()
with tarfile.open(dist.path, 'r') as tar:
for m in tar.getmembers():
if m.name in _hs_deploy_map:
targetDir = join(jdkDir, _hs_deploy_map[m.name])
mx.logv('Deploying {} from {} to {}'.format(m.name, dist.name, targetDir))
tar.extract(m, targetDir)
updateJvmCfg(jdkDir, get_vm())
"""
List of distributions that are deployed into a JDK by mx.
"""
jdkDeployedDists = [
LibJDKDeployedDist('JVMCI_SERVICE', partOfHotSpot=True),
JvmciJDKDeployedDist('JVMCI_API', partOfHotSpot=True),
JvmciJDKDeployedDist('JVMCI_HOTSPOT', partOfHotSpot=True),
JvmciJDKDeployedDist('JVMCI_HOTSPOTVMCONFIG', partOfHotSpot=True),
JvmciJDKDeployedDist('JVMCI_OPTIONS', partOfHotSpot=True),
HotSpotVMJDKDeployedDist('JVM_<vmbuild>_<vm>'),
]
JDK_UNIX_PERMISSIONS_DIR = 0755
JDK_UNIX_PERMISSIONS_FILE = 0644
JDK_UNIX_PERMISSIONS_EXEC = 0755
def isVMSupported(vm):
if 'client' == vm and len(platform.mac_ver()[0]) != 0:
# Client VM not supported: java launcher on Mac OS X translates '-client' to '-server'
return False
return True
def get_vm_cwd():
"""
Get the current working directory to switch to before running the VM.
"""
return _vm_cwd
def get_installed_jdks():
"""
Get the base directory in which the JDKs cloned from $JAVA_HOME exist.
"""
return _installed_jdks
def get_vm_prefix(asList=True):
"""
Get the prefix for running the VM ("/usr/bin/gdb --args").
"""
if asList:
return _vm_prefix.split() if _vm_prefix is not None else []
return _vm_prefix
def get_vm_choices():
"""
Get the names of available VMs.
"""
return _vmChoices.viewkeys()
def dealiased_vm(vm):
"""
If 'vm' is an alias, returns the aliased name otherwise returns 'vm'.
"""
if vm and vm in _vmAliases:
return _vmAliases[vm]
return vm
def get_vm():
"""
Gets the configured VM, presenting a dialogue if there is no currently configured VM.
"""
global _vm
if _vm:
return _vm
vm = mx.get_env('DEFAULT_VM')
envPath = join(_suite.mxDir, 'env')
if vm and vm in _vmAliases:
if exists(envPath):
with open(envPath) as fp:
if 'DEFAULT_VM=' + vm in fp.read():
mx.log('Please update the DEFAULT_VM value in ' + envPath + ' to replace "' + vm + '" with "' + _vmAliases[vm] + '"')
vm = _vmAliases[vm]
if vm is None:
if not mx.is_interactive():
mx.abort('Need to specify VM with --vm option or DEFAULT_VM environment variable')
mx.log('Please select the VM to be executed from the following: ')
items = [k for k in _vmChoices.keys() if _vmChoices[k] is not None]
descriptions = [_vmChoices[k] for k in _vmChoices.keys() if _vmChoices[k] is not None]
vm = mx.select_items(items, descriptions, allowMultiple=False)
mx.ask_persist_env('DEFAULT_VM', vm)
_vm = vm
return vm
"""
A context manager that can be used with the 'with' statement to set the VM
used by all VM executions within the scope of the 'with' statement. For example:
with VM('server'):
dacapo(['pmd'])
"""
class VM:
def __init__(self, vm=None, build=None):
assert vm is None or vm in _vmChoices.keys()
assert build is None or build in _vmbuildChoices
self.vm = vm if vm else _vm
self.build = build if build else _vmbuild
def __enter__(self):
global _vm, _vmbuild
self.previousVm = _vm
self.previousBuild = _vmbuild
mx.reInstantiateDistribution('JVM_<vmbuild>_<vm>', dict(vm=self.previousVm, vmbuild=self.previousBuild), dict(vm=self.vm, vmbuild=self.build))
_vm = self.vm
_vmbuild = self.build
def __exit__(self, exc_type, exc_value, traceback):
global _vm, _vmbuild
mx.reInstantiateDistribution('JVM_<vmbuild>_<vm>', dict(vm=self.vm, vmbuild=self.build), dict(vm=self.previousVm, vmbuild=self.previousBuild))
_vm = self.previousVm
_vmbuild = self.previousBuild
def chmodRecursive(dirname, chmodFlagsDir):
if mx.get_os() == 'windows':
return
def _chmodDir(chmodFlags, dirname, fnames):
os.chmod(dirname, chmodFlagsDir)
os.path.walk(dirname, _chmodDir, chmodFlagsDir)
def export(args):
"""create archives of builds split by vmbuild and vm"""
parser = ArgumentParser(prog='mx export')
args = parser.parse_args(args)
# collect data about export
infos = dict()
infos['timestamp'] = time.time()
hgcfg = mx.HgConfig()
hgcfg.check()
infos['revision'] = hgcfg.tip('.') + ('+' if hgcfg.isDirty('.') else '')
# TODO: infos['repository']
infos['jdkversion'] = str(get_jvmci_bootstrap_jdk().version)
infos['architecture'] = mx.get_arch()
infos['platform'] = mx.get_os()
if mx.get_os != 'windows':
pass
# infos['ccompiler']
# infos['linker']
infos['hostname'] = socket.gethostname()
def _writeJson(suffix, properties):
d = infos.copy()
for k, v in properties.iteritems():
assert not d.has_key(k)
d[k] = v
jsonFileName = 'export-' + suffix + '.json'
with open(jsonFileName, 'w') as f:
print >> f, json.dumps(d)
return jsonFileName
def _genFileName(archivetype, middle):
idPrefix = infos['revision'] + '_'
idSuffix = '.tar.gz'
return join(_suite.dir, "graalvm_" + archivetype + "_" + idPrefix + middle + idSuffix)
def _genFileArchPlatformName(archivetype, middle):
return _genFileName(archivetype, infos['platform'] + '_' + infos['architecture'] + '_' + middle)
# archive different build types of hotspot
for vmBuild in _vmbuildChoices:
jdkDir = join(_jdksDir(), vmBuild)
if not exists(jdkDir):
mx.logv("skipping " + vmBuild)
continue
tarName = _genFileArchPlatformName('basejdk', vmBuild)
mx.logv("creating basejdk " + tarName)
vmSet = set()
with tarfile.open(tarName, 'w:gz') as tar:
for root, _, files in os.walk(jdkDir):
if basename(root) in _vmChoices.keys():
# TODO: add some assert to check path assumption
vmSet.add(root)
continue
for f in files:
name = join(root, f)
# print name
tar.add(name, name)
n = _writeJson("basejdk-" + vmBuild, {'vmbuild' : vmBuild})
tar.add(n, n)
# create a separate archive for each VM
for vm in vmSet:
bVm = basename(vm)
vmTarName = _genFileArchPlatformName('vm', vmBuild + '_' + bVm)
mx.logv("creating vm " + vmTarName)
debugFiles = set()
with tarfile.open(vmTarName, 'w:gz') as tar:
for root, _, files in os.walk(vm):
for f in files:
# TODO: mac, windows, solaris?
if any(map(f.endswith, [".debuginfo"])):
debugFiles.add(f)
else:
name = join(root, f)
# print name
tar.add(name, name)
n = _writeJson("vm-" + vmBuild + "-" + bVm, {'vmbuild' : vmBuild, 'vm' : bVm})
tar.add(n, n)
if len(debugFiles) > 0:
debugTarName = _genFileArchPlatformName('debugfilesvm', vmBuild + '_' + bVm)
mx.logv("creating debugfilesvm " + debugTarName)
with tarfile.open(debugTarName, 'w:gz') as tar:
for f in debugFiles:
name = join(root, f)
# print name
tar.add(name, name)
n = _writeJson("debugfilesvm-" + vmBuild + "-" + bVm, {'vmbuild' : vmBuild, 'vm' : bVm})
tar.add(n, n)
# jvmci directory
jvmciDirTarName = _genFileName('classfiles', 'javac')
mx.logv("creating jvmci " + jvmciDirTarName)
with tarfile.open(jvmciDirTarName, 'w:gz') as tar:
for root, _, files in os.walk("jvmci"):
for f in [f for f in files if not f.endswith('.java')]:
name = join(root, f)
# print name
tar.add(name, name)
n = _writeJson("jvmci", {'javacompiler' : 'javac'})
tar.add(n, n)
def relativeVmLibDirInJdk():
mxos = mx.get_os()
if mxos == 'darwin':
return join('jre', 'lib')
if mxos == 'windows' or mxos == 'cygwin':
return join('jre', 'bin')
return join('jre', 'lib', mx.get_arch())
def vmLibDirInJdk(jdkDir):
"""
Gets the directory within a JDK where the server and client
sub-directories are located.
"""
return join(jdkDir, relativeVmLibDirInJdk())
def getVmJliLibDirs(jdkDir):
"""
Get the directories within a JDK where the jli library designates to.
"""
mxos = mx.get_os()
if mxos == 'darwin':
return [join(jdkDir, 'jre', 'lib', 'jli')]
if mxos == 'windows' or mxos == 'cygwin':
return [join(jdkDir, 'jre', 'bin'), join(jdkDir, 'bin')]
return [join(jdkDir, 'jre', 'lib', mx.get_arch(), 'jli'), join(jdkDir, 'lib', mx.get_arch(), 'jli')]
def getVmCfgInJdk(jdkDir, jvmCfgFile='jvm.cfg'):
"""
Get the jvm.cfg file.
"""
mxos = mx.get_os()
if mxos == "windows" or mxos == "cygwin":
return join(jdkDir, 'jre', 'lib', mx.get_arch(), jvmCfgFile)
return join(vmLibDirInJdk(jdkDir), jvmCfgFile)
def _jdksDir():
return os.path.abspath(join(_installed_jdks if _installed_jdks else _suite.dir, 'jdk' + str(get_jvmci_bootstrap_jdk().version)))
def _handle_missing_VM(bld, vm=None):
if not vm:
vm = get_vm()
mx.log('The ' + bld + ' ' + vm + ' VM has not been created')
if mx.is_interactive():
if mx.ask_yes_no('Build it now', 'y'):
with VM(vm, bld):
build([])
return
mx.abort('You need to run "mx --vm ' + vm + ' --vmbuild ' + bld + ' build" to build the selected VM')
def check_VM_exists(vm, jdkDir, build=None):
if not build:
build = _vmbuild
jvmCfg = getVmCfgInJdk(jdkDir)
found = False
with open(jvmCfg) as f:
for line in f:
if line.strip() == '-' + vm + ' KNOWN':
found = True
break
if not found:
_handle_missing_VM(build, vm)
def get_jvmci_jdk_dir(build=None, vmToCheck=None, create=False, deployDists=True):
"""
Gets the path of the JVMCI JDK corresponding to 'build' (or '_vmbuild'), creating it
first if it does not exist and 'create' is True. If the JDK was created or
'deployDists' is True, then the JDK deployable distributions are deployed into
the JDK.
"""
if not build:
build = _vmbuild
jdkDir = join(_jdksDir(), build)
if create:
srcJdk = get_jvmci_bootstrap_jdk().home
if not exists(jdkDir):
mx.log('Creating ' + jdkDir + ' from ' + srcJdk)
shutil.copytree(srcJdk, jdkDir)
# Make a copy of the default VM so that this JDK can be
# reliably used as the bootstrap for a HotSpot build.
jvmCfg = getVmCfgInJdk(jdkDir)
if not exists(jvmCfg):
mx.abort(jvmCfg + ' does not exist')
defaultVM = None
jvmCfgLines = []
with open(jvmCfg) as f:
for line in f:
if line.startswith('-') and defaultVM is None:
parts = line.split()
if len(parts) == 2:
assert parts[1] == 'KNOWN', parts[1]
defaultVM = parts[0][1:]
jvmCfgLines += ['# default VM is a copy of the unmodified ' + defaultVM + ' VM\n']
jvmCfgLines += ['-original KNOWN\n']
else:
# skip lines which we cannot parse (e.g. '-hotspot ALIASED_TO -client')
mx.log("WARNING: skipping not parsable line \"" + line + "\"")
else:
jvmCfgLines += [line]
assert defaultVM is not None, 'Could not find default VM in ' + jvmCfg
chmodRecursive(jdkDir, JDK_UNIX_PERMISSIONS_DIR)
shutil.move(join(vmLibDirInJdk(jdkDir), defaultVM), join(vmLibDirInJdk(jdkDir), 'original'))
if mx.get_os() != 'windows':
os.chmod(jvmCfg, JDK_UNIX_PERMISSIONS_FILE)
with open(jvmCfg, 'w') as fp:
for line in jvmCfgLines:
fp.write(line)
# patch 'release' file (append jvmci revision)
releaseFile = join(jdkDir, 'release')
if exists(releaseFile):
releaseFileLines = []
with open(releaseFile) as f:
for line in f:
releaseFileLines.append(line)
if mx.get_os() != 'windows':
os.chmod(releaseFile, JDK_UNIX_PERMISSIONS_FILE)
with open(releaseFile, 'w') as fp:
for line in releaseFileLines:
if line.startswith("SOURCE="):
try:
sourceLine = line[0:-2] # remove last char
hgcfg = mx.HgConfig()
hgcfg.check()
revision = hgcfg.tip('.')[:12] # take first 12 chars
fp.write(sourceLine + ' jvmci:' + revision + '\"\n')
except:
fp.write(line)
else:
fp.write(line)
# Install a copy of the disassembler library
try:
hsdis([], copyToDir=vmLibDirInJdk(jdkDir))
except SystemExit:
pass
else:
if not exists(jdkDir):
if _installed_jdks:
mx.log("The selected JDK directory does not (yet) exist: " + jdkDir)
_handle_missing_VM(build, vmToCheck)
if deployDists:
for jdkDist in jdkDeployedDists:
dist = jdkDist.dist()
if exists(dist.path):
_installDistInJdks(jdkDist)
if vmToCheck is not None:
jvmCfg = getVmCfgInJdk(jdkDir)
found = False
with open(jvmCfg) as f:
for line in f:
if line.strip() == '-' + vmToCheck + ' KNOWN':
found = True
break
if not found:
_handle_missing_VM(build, vmToCheck)
return jdkDir
def _updateInstalledJVMCIOptionsFile(jdkDir):
jvmciOptions = join(_suite.dir, 'jvmci.options')
jreLibDir = join(jdkDir, 'jre', 'lib')
if exists(jvmciOptions):
shutil.copy(jvmciOptions, join(jreLibDir, 'jvmci', 'options'))
else:
toDelete = join(jreLibDir, 'jvmci', 'options')
if exists(toDelete):
os.unlink(toDelete)
def copyToJdk(src, dst, permissions=JDK_UNIX_PERMISSIONS_FILE):
name = os.path.basename(src)
if not exists(dst):
os.makedirs(dst)
dstLib = join(dst, name)
if mx.get_env('SYMLINK_GRAAL_JAR', None) == 'true':
# Using symlinks is much faster than copying but may
# cause issues if the lib is being updated while
# the VM is running.
if not os.path.islink(dstLib) or not os.path.realpath(dstLib) == src:
if exists(dstLib):
os.remove(dstLib)
os.symlink(src, dstLib)
else:
# do a copy and then a move to get atomic updating (on Unix)
fd, tmp = tempfile.mkstemp(suffix='', prefix=name, dir=dst)
shutil.copyfile(src, tmp)
os.close(fd)
shutil.move(tmp, dstLib)
os.chmod(dstLib, permissions)
def _extractJVMCIFiles(jdkJars, jvmciJars, servicesDir, obsoleteCheck):
oldServices = os.listdir(servicesDir) if exists(servicesDir) else os.makedirs(servicesDir)
jvmciServices = {}
for jar in jvmciJars:
if os.path.isfile(jar):
with zipfile.ZipFile(jar) as zf:
for member in zf.namelist():
if member.startswith('META-INF/jvmci.services/') and member != 'META-INF/jvmci.services/':
service = basename(member)
assert service != "", member
with zf.open(member) as serviceFile:
providers = jvmciServices.setdefault(service, [])
for line in serviceFile.readlines():
line = line.strip()
if line and line not in providers:
providers.append(line)
for service, providers in jvmciServices.iteritems():
if not obsoleteCheck:
fd, tmp = tempfile.mkstemp(prefix=service)
f = os.fdopen(fd, 'w+')
for provider in providers:
f.write(provider + os.linesep)
target = join(servicesDir, service)
f.close()
shutil.move(tmp, target)
if mx.get_os() != 'windows':
os.chmod(target, JDK_UNIX_PERMISSIONS_FILE)
if oldServices and service in oldServices:
oldServices.remove(service)
if obsoleteCheck and mx.is_interactive() and oldServices:
if mx.ask_yes_no('These files in ' + servicesDir + ' look obsolete:\n ' + '\n '.join(oldServices) + '\nDelete them', 'n'):
for f in oldServices:
path = join(servicesDir, f)
os.remove(path)
mx.log('Deleted ' + path)
def _updateJVMCIFiles(jdkDir, obsoleteCheck=False):
jreJVMCIDir = join(jdkDir, 'jre', 'lib', 'jvmci')
jvmciJars = [join(jreJVMCIDir, e) for e in os.listdir(jreJVMCIDir) if e.endswith('.jar')]
jreJVMCIServicesDir = join(jreJVMCIDir, 'services')
_extractJVMCIFiles(_getJdkDeployedJars(jdkDir), jvmciJars, jreJVMCIServicesDir, obsoleteCheck)
def _updateJVMCIProperties(jdkDir, compilers):
jvmciProperties = join(jdkDir, 'jre', 'lib', 'jvmci', 'jvmci.properties')
def createFile(lines):
with open(jvmciProperties, 'w') as fp:
header = "# the last definition of a property wins (i.e., it overwrites any earlier definitions)"
if header not in lines:
print >> fp, header
for line in lines:
print >> fp, line
lines = []
if exists(jvmciProperties):
with open(jvmciProperties) as fp:
for line in fp:
if line.startswith('jvmci.compiler='):
compiler = line.strip().split('=')[1]
if compiler not in compilers:
lines.append(line.strip())
else:
lines.append(line.strip())
for compiler in compilers:
lines.append("jvmci.compiler=" + compiler)
createFile(lines)
def _installDistInJdks(deployableDist):
"""
Installs the jar(s) for a given Distribution into all existing JVMCI JDKs
"""
jdks = _jdksDir()
if exists(jdks):
for e in os.listdir(jdks):
jdkDir = join(jdks, e)
deployableDist.deploy(jdkDir)
def _vmbuildFromJdkDir(jdkDir):
"""
Determines the VM build corresponding to 'jdkDir'.
"""
jdksDir = _jdksDir()
assert jdkDir.startswith(jdksDir)
vmbuild = os.path.relpath(jdkDir, jdksDir)
assert vmbuild in _vmbuildChoices, 'The vmbuild derived from ' + jdkDir + ' is unknown: ' + vmbuild
return vmbuild
def _check_for_obsolete_jvmci_files():
jdks = _jdksDir()
if exists(jdks):
for e in os.listdir(jdks):
jdkDir = join(jdks, e)
_updateJVMCIFiles(jdkDir, obsoleteCheck=True)
def _getJdkDeployedJars(jdkDir):
"""
Gets jar paths for all deployed distributions in the context of
a given JDK directory.
"""
jars = []
for dist in jdkDeployedDists:
if not isinstance(dist, JarJDKDeployedDist):
continue
jar = basename(dist.dist().path)
jars.append(join(dist.targetDir(), jar))
return jars
# run a command in the windows SDK Debug Shell
def _runInDebugShell(cmd, workingDir, logFile=None, findInOutput=None, respondTo=None):
if respondTo is None:
respondTo = {}
newLine = os.linesep
startToken = 'RUNINDEBUGSHELL_STARTSEQUENCE'
endToken = 'RUNINDEBUGSHELL_ENDSEQUENCE'
winSDK = mx.get_env('WIN_SDK', 'C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\')
if not exists(mx._cygpathW2U(winSDK)):
mx.abort("Could not find Windows SDK : '" + winSDK + "' does not exist")
winSDKSetEnv = mx._cygpathW2U(join(winSDK, 'Bin', 'SetEnv.cmd'))
if not exists(winSDKSetEnv):
mx.abort("Invalid Windows SDK path (" + winSDK + ") : could not find Bin/SetEnv.cmd (you can use the WIN_SDK environment variable to specify an other path)")
wincmd = 'cmd.exe /E:ON /V:ON /K "' + mx._cygpathU2W(winSDKSetEnv) + '"'
p = subprocess.Popen(wincmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = p.stdout
stdin = p.stdin
if logFile:
log = open(logFile, 'w')
ret = False
def _writeProcess(s):
stdin.write(s + newLine)
_writeProcess("echo " + startToken)
while True:
# encoding may be None on windows plattforms
if sys.stdout.encoding is None:
encoding = 'utf-8'
else:
encoding = sys.stdout.encoding
line = stdout.readline().decode(encoding)
if logFile:
log.write(line.encode('utf-8'))
line = line.strip()
mx.log(line)
if line == startToken:
_writeProcess('cd /D ' + workingDir + ' & ' + cmd + ' & echo ' + endToken)
for regex in respondTo.keys():
match = regex.search(line)
if match:
_writeProcess(respondTo[regex])
if findInOutput:
match = findInOutput.search(line)
if match:
ret = True
if line == endToken:
if not findInOutput:
_writeProcess('echo ERRXXX%errorlevel%')
else:
break
if line.startswith('ERRXXX'):
if line == 'ERRXXX0':
ret = True
break
_writeProcess("exit")
if logFile:
log.close()
return ret
def jdkhome(vm=None):
"""return the JDK directory selected for the 'vm' command"""
return get_jvmci_jdk_dir(deployDists=False)
def print_jdkhome(args, vm=None):
"""print the JDK directory selected for the 'vm' command"""
print jdkhome(vm)
def buildvars(args):
"""describe the variables that can be set by the -D option to the 'mx build' commmand"""
buildVars = {
'ALT_BOOTDIR' : 'The location of the bootstrap JDK installation (default: ' + get_jvmci_bootstrap_jdk().home + ')',
'ALT_OUTPUTDIR' : 'Build directory',
'HOTSPOT_BUILD_JOBS' : 'Number of CPUs used by make (default: ' + str(mx.cpu_count()) + ')',
'INSTALL' : 'Install the built VM into the JDK? (default: y)',
'ZIP_DEBUGINFO_FILES' : 'Install zipped debug symbols file? (default: 0)',
}
mx.log('HotSpot build variables that can be set by the -D option to "mx build":')
mx.log('')
for n in sorted(buildVars.iterkeys()):
mx.log(n)
mx.log(textwrap.fill(buildVars[n], initial_indent=' ', subsequent_indent=' ', width=200))
mx.log('')
mx.log('Note that these variables can be given persistent values in the file ' + join(_suite.mxDir, 'env') + ' (see \'mx about\').')
def _hotspotReplaceResultsVar(m):
var = m.group(1)
if var == 'os':
return _hotspotOs(mx.get_os())
if var == 'nojvmci':
if get_vm().endswith('nojvmci'):
return '-nojvmci'
return ''
if var == 'buildname':
return _hotspotGetVariant()
if var == 'vmbuild':
return _vmbuild
return mx._replaceResultsVar(m)
class HotSpotProject(mx.NativeProject):
def __init__(self, suite, name, deps, workingSets, results, output, **args):
mx.NativeProject.__init__(self, suite, name, "", [], deps, workingSets, results, output, join(suite.dir, "src")) # TODO...
def getOutput(self, replaceVar=_hotspotReplaceResultsVar):
return mx.NativeProject.getOutput(self, replaceVar=replaceVar)
def getResults(self, replaceVar=_hotspotReplaceResultsVar):
return mx.NativeProject.getResults(self, replaceVar=replaceVar)
def getBuildTask(self, args):
return HotSpotBuildTask(self, args, _vmbuild, get_vm())
def _hotspotOs(mx_os):
if mx_os == 'darwin':
return 'bsd'
return mx_os
def _hotspotGetVariant(vm=None):
if not vm:
vm = get_vm()
variant = {'client': 'compiler1', 'server': 'compiler2', 'client-nojvmci': 'compiler1', 'server-nojvmci': 'compiler2'}.get(vm, vm)
return variant
class HotSpotBuildTask(mx.NativeBuildTask):
def __init__(self, project, args, vmbuild, vm):
mx.NativeBuildTask.__init__(self, args, project)
self.vm = vm
self.vmbuild = vmbuild
def __str__(self):
return 'Building HotSpot[{}, {}]'.format(self.vmbuild, self.vm)
def build(self):
isWindows = platform.system() == 'Windows' or "CYGWIN" in platform.system()
if self.vm.startswith('server'):
buildSuffix = ''
elif self.vm.startswith('client'):
buildSuffix = '1'
else:
assert self.vm == 'jvmci', self.vm
buildSuffix = 'jvmci'
if isWindows:
t_compilelogfile = mx._cygpathU2W(os.path.join(_suite.dir, "jvmciCompile.log"))
mksHome = mx.get_env('MKS_HOME', 'C:\\cygwin\\bin')
variant = _hotspotGetVariant(self.vm)
project_config = variant + '_' + self.vmbuild
jvmciHome = mx._cygpathU2W(_suite.dir)
_runInDebugShell('msbuild ' + jvmciHome + r'\build\vs-amd64\jvm.vcproj /p:Configuration=' + project_config + ' /target:clean', jvmciHome)
winCompileCmd = r'set HotSpotMksHome=' + mksHome + r'& set JAVA_HOME=' + mx._cygpathU2W(get_jvmci_bootstrap_jdk().home) + r'& set path=%JAVA_HOME%\bin;%path%;%HotSpotMksHome%& cd /D "' + jvmciHome + r'\make\windows"& call create.bat ' + jvmciHome
print winCompileCmd
winCompileSuccess = re.compile(r"^Writing \.vcxproj file:")
if not _runInDebugShell(winCompileCmd, jvmciHome, t_compilelogfile, winCompileSuccess):
mx.abort('Error executing create command')
winBuildCmd = 'msbuild ' + jvmciHome + r'\build\vs-amd64\jvm.vcxproj /p:Configuration=' + project_config + ' /p:Platform=x64'
if not _runInDebugShell(winBuildCmd, jvmciHome, t_compilelogfile):
mx.abort('Error building project')
else:
def filterXusage(line):
if not 'Xusage.txt' in line:
sys.stderr.write(line + os.linesep)
cpus = self.parallelism
makeDir = join(_suite.dir, 'make')
runCmd = [mx.gmake_cmd(), '-C', makeDir]
env = os.environ.copy()
# These must be passed as environment variables
env.setdefault('LANG', 'C')
#env['JAVA_HOME'] = jdk
def setMakeVar(name, default, env=None):
"""Sets a make variable on the command line to the value
of the variable in 'env' with the same name if defined
and 'env' is not None otherwise to 'default'
"""
runCmd.append(name + '=' + (env.get(name, default) if env else default))
if self.args.D:
for nv in self.args.D:
name, value = nv.split('=', 1)
setMakeVar(name.strip(), value)
setMakeVar('ARCH_DATA_MODEL', '64', env=env)
setMakeVar('HOTSPOT_BUILD_JOBS', str(cpus), env=env)
setMakeVar('ALT_BOOTDIR', get_jvmci_bootstrap_jdk().home, env=env)
# setMakeVar("EXPORT_PATH", jdk)
setMakeVar('MAKE_VERBOSE', 'y' if mx._opts.verbose else '')
if self.vm.endswith('nojvmci'):
setMakeVar('INCLUDE_JVMCI', 'false')
setMakeVar('ALT_OUTPUTDIR', join(_suite.dir, 'build-nojvmci', _hotspotOs(mx.get_os())), env=env)
else:
version = _suite.release_version()
setMakeVar('USER_RELEASE_SUFFIX', 'jvmci-' + version)
setMakeVar('INCLUDE_JVMCI', 'true')
# setMakeVar('INSTALL', 'y', env=env)
if mx.get_os() == 'darwin' and platform.mac_ver()[0] != '':
# Force use of clang on MacOS
setMakeVar('USE_CLANG', 'true')
setMakeVar('COMPILER_WARNINGS_FATAL', 'false')
if mx.get_os() == 'solaris':
# If using sparcWorks, setup flags to avoid make complaining about CC version
cCompilerVersion = subprocess.Popen('CC -V', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).stderr.readlines()[0]
if cCompilerVersion.startswith('CC: Sun C++'):
compilerRev = cCompilerVersion.split(' ')[3]
setMakeVar('ENFORCE_COMPILER_REV', compilerRev, env=env)
setMakeVar('ENFORCE_CC_COMPILER_REV', compilerRev, env=env)
if self.vmbuild == 'jvmg':
# We want ALL the symbols when debugging on Solaris
setMakeVar('STRIP_POLICY', 'no_strip')
# This removes the need to unzip the *.diz files before debugging in gdb
setMakeVar('ZIP_DEBUGINFO_FILES', '0', env=env)
if buildSuffix == "1":
setMakeVar("BUILD_CLIENT_ONLY", "true")
# Clear this variable as having it set can cause very confusing build problems
env.pop('CLASSPATH', None)
# Issue an env prefix that can be used to run the make on the command line
if not mx._opts.verbose:
mx.log('--------------- make command line ----------------------')
envPrefix = ' '.join([key + '=' + env[key] for key in env.iterkeys() if not os.environ.has_key(key) or env[key] != os.environ[key]])
if len(envPrefix):
mx.log('env ' + envPrefix + ' \\')
runCmd.append(self.vmbuild + buildSuffix)
runCmd.append("docs")
# runCmd.append("export_" + build)
if not mx._opts.verbose:
mx.log(' '.join(runCmd))
mx.log('--------------------------------------------------------')
mx.run(runCmd, err=filterXusage, env=env)
self._newestOutput = None
def needsBuild(self, newestInput):
newestOutput = self.newestOutput()
for d in ['src', 'make', join('jvmci', 'jdk.internal.jvmci.hotspot', 'src_gen', 'hotspot')]: # TODO should this be replaced by a dependency to the project?
for root, dirnames, files in os.walk(join(_suite.dir, d)):
# ignore src/share/tools
if root == join(_suite.dir, 'src', 'share'):
dirnames.remove('tools')
for f in (join(root, name) for name in files):
ts = mx.TimeStampFile(f)
if newestOutput:
if not newestOutput.exists():
return (True, '{} does not exist'.format(newestOutput))
if ts.isNewerThan(newestOutput):
return (True, '{} is newer than {}'.format(ts, newestOutput))
return (False, None)
def buildForbidden(self):
if mx.NativeBuildTask.buildForbidden(self):
return True
if self.vm == 'original':
if self.vmbuild != 'product':
mx.log('only product build of original VM exists')
return True
if not isVMSupported(self.vm):
mx.log('The ' + self.vm + ' VM is not supported on this platform - skipping')
return True
return False
def clean(self, forBuild=False):
if forBuild: # Let make handle incremental builds
return
def handleRemoveReadonly(func, path, exc):
excvalue = exc[1]
if mx.get_os() == 'windows' and func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise
def rmIfExists(name):
if os.path.isdir(name):
shutil.rmtree(name, ignore_errors=False, onerror=handleRemoveReadonly)
elif os.path.isfile(name):
os.unlink(name)
rmIfExists(join(_suite.dir, 'build'))
rmIfExists(join(_suite.dir, 'build-nojvmci'))
rmIfExists(_jdksDir())
self._newestOutput = None
def build(args, vm=None):
"""build the VM binary
The global '--vm' and '--vmbuild' options select which VM type and build target to build."""
# Override to fail quickly if extra arguments are given
# at the end of the command line. This allows for a more
# helpful error message.
class AP(ArgumentParser):
def __init__(self):
ArgumentParser.__init__(self, prog='mx build')
def parse_args(self, args):
result = ArgumentParser.parse_args(self, args)
if len(result.remainder) != 0:
firstBuildTarget = result.remainder[0]
mx.abort('To specify the ' + firstBuildTarget + ' VM build target, you need to use the global "--vmbuild" option. For example:\n' +
' mx --vmbuild ' + firstBuildTarget + ' build')
return result
# Call mx.build to compile the Java sources
parser = AP()
parser.add_argument('-D', action='append', help='set a HotSpot build variable (run \'mx buildvars\' to list variables)', metavar='name=value')
# initialize jdk
get_jvmci_jdk_dir(create=True)
mx.build(['--source', '1.7'] + args, parser=parser)
def updateJvmCfg(jdkDir, vm):
jvmCfg = getVmCfgInJdk(jdkDir)
if not exists(jvmCfg):
mx.abort(jvmCfg + ' does not exist')
prefix = '-' + vm + ' '
vmKnown = prefix + 'KNOWN\n'
lines = []
found = False
with open(jvmCfg) as f:
for line in f:
if line.strip() == vmKnown.strip():
found = True
lines.append(line)
if not found:
mx.log('Prepending "' + prefix + 'KNOWN" to ' + jvmCfg)
if mx.get_os() != 'windows':
os.chmod(jvmCfg, JDK_UNIX_PERMISSIONS_FILE)
with open(jvmCfg, 'w') as f:
written = False
for line in lines:
if line.startswith('#'):
f.write(line)
continue
if not written:
f.write(vmKnown)
for alias, aliased in _vmAliases.iteritems():
if vm == aliased:
f.write('-' + alias + ' ALIASED_TO -' + aliased + '\n')
written = True
if line.startswith(prefix):
line = vmKnown
if written:
continue
f.write(line)
mx_gate.add_jacoco_includes(['jdk.internal.jvmci.*'])
def run_vm(args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, vmbuild=None):
"""
Runs a Java program by executing the java executable in a JVMCI JDK.
"""
jdkTag = mx.get_jdk_option().tag
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.abort('The "--jdk" option must have the tag "' + _JVMCI_JDK_TAG + '" when running a command requiring a JVMCI VM')
jdk = get_jvmci_jdk(vmbuild=vmbuild)
return jdk.run_java(args, vm=vm, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout)
def _unittest_config_participant(config):
vmArgs, mainClass, mainClassArgs = config
if isJVMCIEnabled(get_vm()):
# Remove entries from class path that are in JVMCI loaded jars
cpIndex, cp = mx.find_classpath_arg(vmArgs)
if cp:
excluded = set()
for jdkDist in jdkDeployedDists:
dist = jdkDist.dist()
excluded.update([d.output_dir() for d in dist.archived_deps() if d.isJavaProject()])
cp = os.pathsep.join([e for e in cp.split(os.pathsep) if e not in excluded])
vmArgs[cpIndex] = cp
# Run the VM in a mode where application/test classes can
# access JVMCI loaded classes.
vmArgs = ['-XX:-UseJVMCIClassLoader'] + vmArgs
return (vmArgs, mainClass, mainClassArgs)
return config
def _unittest_vm_launcher(vmArgs, mainClass, mainClassArgs):
run_vm(vmArgs + [mainClass] + mainClassArgs)
mx_unittest.add_config_participant(_unittest_config_participant)
mx_unittest.set_vm_launcher('JVMCI VM launcher', _unittest_vm_launcher)
def shortunittest(args):
"""alias for 'unittest --whitelist test/whitelist_shortunittest.txt'"""
args = ['--whitelist', 'test/whitelist_shortunittest.txt'] + args
mx_unittest.unittest(args)
def buildvms(args):
"""build one or more VMs in various configurations"""
vmsDefault = ','.join(_vmChoices.keys())
vmbuildsDefault = ','.join(_vmbuildChoices)
parser = ArgumentParser(prog='mx buildvms')
parser.add_argument('--vms', help='a comma separated list of VMs to build (default: ' + vmsDefault + ')', metavar='<args>', default=vmsDefault)
parser.add_argument('--builds', help='a comma separated list of build types (default: ' + vmbuildsDefault + ')', metavar='<args>', default=vmbuildsDefault)
parser.add_argument('-n', '--no-check', action='store_true', help='omit running "java -version" after each build')
parser.add_argument('-c', '--console', action='store_true', help='send build output to console instead of log file')
args = parser.parse_args(args)
vms = args.vms.split(',')
builds = args.builds.split(',')
allStart = time.time()
for vm in vms:
if not isVMSupported(vm):
mx.log('The ' + vm + ' VM is not supported on this platform - skipping')
continue
for vmbuild in builds:
if vm == 'original' and vmbuild != 'product':
continue
if not args.console:
logFile = join(vm + '-' + vmbuild + '.log')
log = open(join(_suite.dir, logFile), 'wb')
start = time.time()
mx.log('BEGIN: ' + vm + '-' + vmbuild + '\t(see: ' + logFile + ')')
verbose = ['-v'] if mx._opts.verbose else []
# Run as subprocess so that output can be directed to a file
cmd = [sys.executable, '-u', mx.__file__] + verbose + ['--vm', vm, '--vmbuild', vmbuild, 'build']
mx.logv("executing command: " + str(cmd))
subprocess.check_call(cmd, cwd=_suite.dir, stdout=log, stderr=subprocess.STDOUT)
duration = datetime.timedelta(seconds=time.time() - start)
mx.log('END: ' + vm + '-' + vmbuild + '\t[' + str(duration) + ']')
else:
with VM(vm, vmbuild):
build([])
if not args.no_check:
vmargs = ['-version']
if vm == 'jvmci':
vmargs.insert(0, '-XX:-BootstrapJVMCI')
run_vm(vmargs, vm=vm, vmbuild=vmbuild)
allDuration = datetime.timedelta(seconds=time.time() - allStart)
mx.log('TOTAL TIME: ' + '[' + str(allDuration) + ']')
def _jvmci_gate_runner(args, tasks):
with Task('Check jvmci.make in sync with suite.py', tasks) as t:
if t:
jvmciMake = join(_suite.dir, 'make', 'jvmci.make')
if mx_jvmci_makefile.build_makefile(['-o', jvmciMake]) != 0:
t.abort('Rerun "mx makefile -o ' + jvmciMake + ' and check-in the modified ' + jvmciMake)
# Build server-hosted-jvmci now so we can run the unit tests
with Task('BuildHotSpotJVMCIHosted: product', tasks) as t:
if t: buildvms(['--vms', 'server', '--builds', 'product'])
# Run unit tests on server-hosted-jvmci
with VM('server', 'product'):
with Task('JVMCI UnitTests: hosted-product', tasks) as t:
if t: unittest(['--suite', 'jvmci', '--enable-timing', '--verbose', '--fail-fast'])
# Build the other VM flavors
with Task('BuildHotSpotJVMCIOthers: fastdebug,product', tasks) as t:
if t: buildvms(['--vms', 'jvmci,server', '--builds', 'fastdebug,product'])
with Task('CleanAndBuildIdealGraphVisualizer', tasks, disableJacoco=True) as t:
if t and platform.processor() != 'sparc':
buildxml = mx._cygpathU2W(join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml'))
mx.run(['ant', '-f', buildxml, '-q', 'clean', 'build'], env=_igvBuildEnv())
# Prevent JVMCI modifications from breaking the standard builds
if args.buildNonJVMCI:
with Task('BuildHotSpotVarieties', tasks, disableJacoco=True) as t:
if t:
buildvms(['--vms', 'client,server', '--builds', 'fastdebug,product'])
if mx.get_os() not in ['windows', 'cygwin']:
buildvms(['--vms', 'server-nojvmci', '--builds', 'product,optimized'])
mx_gate.add_gate_runner(_suite, _jvmci_gate_runner)
mx_gate.add_gate_argument('-g', '--only-build-jvmci', action='store_false', dest='buildNonJVMCI', help='only build the JVMCI VM')
def deoptalot(args):
"""bootstrap a VM with DeoptimizeALot and VerifyOops on
If the first argument is a number, the process will be repeated
this number of times. All other arguments are passed to the VM."""
count = 1
if len(args) > 0 and args[0].isdigit():
count = int(args[0])
del args[0]
for _ in range(count):
if not run_vm(['-XX:-TieredCompilation', '-XX:+DeoptimizeALot', '-XX:+VerifyOops'] + args + ['-version']) == 0:
mx.abort("Failed")
def longtests(args):
deoptalot(['15', '-Xmx48m'])
def _igvJdk():
v8u20 = mx.VersionSpec("1.8.0_20")
v8u40 = mx.VersionSpec("1.8.0_40")
v8 = mx.VersionSpec("1.8")
def _igvJdkVersionCheck(version):
return version >= v8 and (version < v8u20 or version >= v8u40)
return mx.get_jdk(_igvJdkVersionCheck, versionDescription='>= 1.8 and < 1.8.0u20 or >= 1.8.0u40', purpose="building & running IGV").home
def _igvBuildEnv():
# When the http_proxy environment variable is set, convert it to the proxy settings that ant needs
env = dict(os.environ)
proxy = os.environ.get('http_proxy')
if not (proxy is None) and len(proxy) > 0:
if '://' in proxy:
# Remove the http:// prefix (or any other protocol prefix)
proxy = proxy.split('://', 1)[1]
# Separate proxy server name and port number
proxyName, proxyPort = proxy.split(':', 1)
proxyEnv = '-DproxyHost="' + proxyName + '" -DproxyPort=' + proxyPort
env['ANT_OPTS'] = proxyEnv
env['JAVA_HOME'] = _igvJdk()
return env
def igv(args):
"""run the Ideal Graph Visualizer"""
logFile = '.ideal_graph_visualizer.log'
with open(join(_suite.dir, logFile), 'w') as fp:
mx.logv('[Ideal Graph Visualizer log is in ' + fp.name + ']')
nbplatform = join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'nbplatform')
# Remove NetBeans platform if it is earlier than the current supported version
if exists(nbplatform):
updateTrackingFile = join(nbplatform, 'platform', 'update_tracking', 'org-netbeans-core.xml')
if not exists(updateTrackingFile):
mx.log('Could not find \'' + updateTrackingFile + '\', removing NetBeans platform')
shutil.rmtree(nbplatform)
else:
dom = xml.dom.minidom.parse(updateTrackingFile)
currentVersion = mx.VersionSpec(dom.getElementsByTagName('module_version')[0].getAttribute('specification_version'))
supportedVersion = mx.VersionSpec('3.43.1')
if currentVersion < supportedVersion:
mx.log('Replacing NetBeans platform version ' + str(currentVersion) + ' with version ' + str(supportedVersion))
shutil.rmtree(nbplatform)
elif supportedVersion < currentVersion:
mx.log('Supported NetBeans version in igv command should be updated to ' + str(currentVersion))
if not exists(nbplatform):
mx.logv('[This execution may take a while as the NetBeans platform needs to be downloaded]')
env = _igvBuildEnv()
# make the jar for Batik 1.7 available.
env['IGV_BATIK_JAR'] = mx.library('BATIK').get_path(True)
if mx.run(['ant', '-f', mx._cygpathU2W(join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml')), '-l', mx._cygpathU2W(fp.name), 'run'], env=env, nonZeroIsFatal=False):
mx.abort("IGV ant build & launch failed. Check '" + logFile + "'. You can also try to delete 'src/share/tools/IdealGraphVisualizer/nbplatform'.")
def c1visualizer(args):
"""run the Cl Compiler Visualizer"""
libpath = join(_suite.dir, 'lib')
if mx.get_os() == 'windows':
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer.exe')
else:
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer')
# Check whether the current C1Visualizer installation is the up-to-date
if exists(executable) and not exists(mx.library('C1VISUALIZER_DIST').get_path(resolve=False)):
mx.log('Updating C1Visualizer')
shutil.rmtree(join(libpath, 'c1visualizer'))
archive = mx.library('C1VISUALIZER_DIST').get_path(resolve=True)
if not exists(executable):
zf = zipfile.ZipFile(archive, 'r')
zf.extractall(libpath)
if not exists(executable):
mx.abort('C1Visualizer binary does not exist: ' + executable)
if mx.get_os() != 'windows':
# Make sure that execution is allowed. The zip file does not always specfiy that correctly
os.chmod(executable, 0777)
mx.run([executable])
def _get_jmh_path():
path = mx.get_env('JMH_BENCHMARKS', None)
if not path:
probe = join(dirname(_suite.dir), 'java-benchmarks')
if exists(probe):
path = probe
if not path:
mx.abort("Please set the JMH_BENCHMARKS environment variable to point to the java-benchmarks workspace")
if not exists(path):
mx.abort("The directory denoted by the JMH_BENCHMARKS environment variable does not exist: " + path)
return path
def makejmhdeps(args):
"""creates and installs Maven dependencies required by the JMH benchmarks
The dependencies are specified by files named pom.mxdeps in the
JMH directory tree. Each such file contains a list of dependencies
defined in JSON format. For example:
'[{"artifactId" : "compiler.test", "groupId" : "com.oracle.graal", "deps" : ["com.oracle.graal.compiler.test"]}]'
will result in a dependency being installed in the local Maven repository
that can be referenced in a pom.xml file as follows:
<dependency>
<groupId>com.oracle.graal</groupId>
<artifactId>compiler.test</artifactId>
<version>1.0-SNAPSHOT</version>
</dependency>"""
parser = ArgumentParser(prog='mx makejmhdeps')
parser.add_argument('-s', '--settings', help='alternative path for Maven user settings file', metavar='<path>')
parser.add_argument('-p', '--permissive', action='store_true', help='issue note instead of error if a Maven dependency cannot be built due to missing projects/libraries')
args = parser.parse_args(args)
def makejmhdep(artifactId, groupId, deps):
path = artifactId + '.jar'
allDeps = []
if args.permissive:
for name, dep in [(d, mx.dependency(d, fatalIfMissing=False)) for d in deps]:
if dep is None:
mx.log('Skipping dependency ' + groupId + '.' + artifactId + ' as ' + name + ' cannot be resolved')
return
if dep.isDistribution():
allDeps = allDeps + [d for d in dep.archived_deps() if d.isJavaProject()]
else:
allDeps.append(dep)
d = mx.JARDistribution(_suite, name=artifactId, subDir=_suite.dir, path=path, sourcesPath=path, deps=allDeps, \
mainClass=None, excludedLibs=[], distDependencies=[], javaCompliance=None, platformDependent=False, theLicense=None)
d.make_archive()
env = os.environ.copy()
jdkDir = get_jvmci_jdk_dir()
check_VM_exists('server', jdkDir)
env['JAVA_HOME'] = jdkDir
env['MAVEN_OPTS'] = '-server -XX:-UseJVMCIClassLoader'
cmd = ['mvn', 'install:install-file', '-DgroupId=' + groupId, '-DartifactId=' + artifactId,
'-Dversion=1.0-SNAPSHOT', '-Dpackaging=jar', '-Dfile=' + d.path]
if not mx._opts.verbose:
cmd.append('-q')
if args.settings:
cmd = cmd + ['-s', args.settings]
mx.run(cmd, env=env)
os.unlink(d.path)
jmhPath = _get_jmh_path()
for root, _, filenames in os.walk(jmhPath):
for f in [join(root, n) for n in filenames if n == 'pom.mxdeps']:
mx.logv('[processing ' + f + ']')
try:
with open(f) as fp:
for d in json.load(fp):
artifactId = d['artifactId']
groupId = d['groupId']
deps = d['deps']
makejmhdep(artifactId, groupId, deps)
except ValueError as e:
mx.abort('Error parsing {0}:\n{1}'.format(f, e))
def buildjmh(args):
"""build the JMH benchmarks"""
parser = ArgumentParser(prog='mx buildjmh')
parser.add_argument('-s', '--settings', help='alternative path for Maven user settings file', metavar='<path>')
parser.add_argument('-c', action='store_true', dest='clean', help='clean before building')
args = parser.parse_args(args)
jmhPath = _get_jmh_path()
mx.log('JMH benchmarks: ' + jmhPath)
# Ensure the mx injected dependencies are up to date
makejmhdeps(['-p'] + (['-s', args.settings] if args.settings else []))
timestamp = mx.TimeStampFile(join(_suite.mxDir, 'jmh', jmhPath.replace(os.sep, '_') + '.timestamp'))
mustBuild = args.clean
if not mustBuild:
try:
hgfiles = [join(jmhPath, f) for f in subprocess.check_output(['hg', '-R', jmhPath, 'locate']).split('\n')]
mustBuild = timestamp.isOlderThan(hgfiles)
except:
# not a Mercurial repository or hg commands are not available.
mustBuild = True
if mustBuild:
buildOutput = []
def _redirect(x):
if mx._opts.verbose:
mx.log(x[:-1])
else:
buildOutput.append(x)
env = os.environ.copy()
jdkDir = get_jvmci_jdk_dir()
check_VM_exists('server', jdkDir)
env['JAVA_HOME'] = jdkDir
env['MAVEN_OPTS'] = '-server -XX:-UseJVMCIClassLoader'
mx.log("Building benchmarks...")
cmd = ['mvn']
if args.settings:
cmd = cmd + ['-s', args.settings]
if args.clean:
cmd.append('clean')
cmd.append('package')
retcode = mx.run(cmd, cwd=jmhPath, out=_redirect, env=env, nonZeroIsFatal=False)
if retcode != 0:
mx.log(''.join(buildOutput))
mx.abort(retcode)
timestamp.touch()
else:
mx.logv('[all Mercurial controlled files in ' + jmhPath + ' are older than ' + timestamp.path + ' - skipping build]')
def jmh(args):
"""run the JMH benchmarks
This command respects the standard --vm and --vmbuild options
for choosing which VM to run the benchmarks with."""
if '-h' in args:
mx.help_(['jmh'])
mx.abort(1)
vmArgs, benchmarksAndJsons = mx.extract_VM_args(args)
if isJVMCIEnabled(get_vm()) and '-XX:-UseJVMCIClassLoader' not in vmArgs:
vmArgs = ['-XX:-UseJVMCIClassLoader'] + vmArgs
benchmarks = [b for b in benchmarksAndJsons if not b.startswith('{')]
jmhArgJsons = [b for b in benchmarksAndJsons if b.startswith('{')]
jmhOutDir = join(_suite.mxDir, 'jmh')
if not exists(jmhOutDir):
os.makedirs(jmhOutDir)
jmhOut = join(jmhOutDir, 'jmh.out')
jmhArgs = {'-rff' : jmhOut, '-v' : 'EXTRA' if mx._opts.verbose else 'NORMAL'}
# e.g. '{"-wi" : 20}'
for j in jmhArgJsons:
try:
for n, v in json.loads(j).iteritems():
if v is None:
del jmhArgs[n]
else:
jmhArgs[n] = v
except ValueError as e:
mx.abort('error parsing JSON input: {0}\n{1}'.format(j, e))
jmhPath = _get_jmh_path()
mx.log('Using benchmarks in ' + jmhPath)
matchedSuites = set()
numBench = [0]
for micros in os.listdir(jmhPath):
absoluteMicro = os.path.join(jmhPath, micros)
if not os.path.isdir(absoluteMicro):
continue
if not micros.startswith("micros-"):
mx.logv('JMH: ignored ' + absoluteMicro + " because it doesn't start with 'micros-'")
continue
microJar = os.path.join(absoluteMicro, "target", "microbenchmarks.jar")
if not exists(microJar):
mx.log('Missing ' + microJar + ' - please run "mx buildjmh"')
continue
if benchmarks:
def _addBenchmark(x):
if x.startswith("Benchmark:"):
return
match = False
for b in benchmarks:
match = match or (b in x)
if match:
numBench[0] += 1
matchedSuites.add(micros)
mx.run_java(['-jar', microJar, "-l"], cwd=jmhPath, out=_addBenchmark, addDefaultArgs=False)
else:
matchedSuites.add(micros)
mx.logv("matchedSuites: " + str(matchedSuites))
plural = 's' if not benchmarks or numBench[0] > 1 else ''
number = str(numBench[0]) if benchmarks else "all"
mx.log("Running " + number + " benchmark" + plural + '...')
regex = []
if benchmarks:
regex.append(r".*(" + "|".join(benchmarks) + ").*")
for suite in matchedSuites:
absoluteMicro = os.path.join(jmhPath, suite)
jdk = get_jvmci_jdk()
vm = get_vm()
pfx = get_vm_prefix()
forkedVmArgs = jdk.parseVmArgs(vmArgs)
def quoteSpace(s):
if " " in s:
return '"' + s + '"'
return s
forkedVmArgs = map(quoteSpace, forkedVmArgs)
if pfx:
mx.log("JMH ignores prefix: \"" + ' '.join(pfx) + "\"")
javaArgs = ['-jar', os.path.join(absoluteMicro, "target", "microbenchmarks.jar"),
'--jvm', jdk.java,
'--jvmArgs', ' '.join(["-" + vm] + forkedVmArgs)]
for k, v in jmhArgs.iteritems():
javaArgs.append(k)
if len(str(v)):
javaArgs.append(str(v))
mx.run_java(javaArgs + regex, addDefaultArgs=False, cwd=jmhPath)
def hsdis(args, copyToDir=None):
"""download the hsdis library
This is needed to support HotSpot's assembly dumping features.
By default it downloads the Intel syntax version, use the 'att' argument to install AT&T syntax."""
flavor = 'intel'
if 'att' in args:
flavor = 'att'
if mx.get_arch() == "sparcv9":
flavor = "sparcv9"
lib = mx.add_lib_suffix('hsdis-' + mx.get_arch())
path = join(_suite.dir, 'lib', lib)
sha1s = {
'att/hsdis-amd64.dll' : 'bcbd535a9568b5075ab41e96205e26a2bac64f72',
'att/hsdis-amd64.so' : '58919ba085d4ef7a513f25bae75e7e54ee73c049',
'intel/hsdis-amd64.dll' : '6a388372cdd5fe905c1a26ced614334e405d1f30',
'intel/hsdis-amd64.so' : '844ed9ffed64fe9599638f29a8450c50140e3192',
'intel/hsdis-amd64.dylib' : 'fdb13ef0d7d23d93dacaae9c98837bea0d4fc5a2',
'sparcv9/hsdis-sparcv9.so': '970640a9af0bd63641f9063c11275b371a59ee60',
}
flavoredLib = flavor + "/" + lib
if flavoredLib not in sha1s:
mx.logv("hsdis not supported on this plattform or architecture")
return
if not exists(path):
sha1 = sha1s[flavoredLib]
sha1path = path + '.sha1'
mx.download_file_with_sha1('hsdis', path, ['https://lafo.ssw.uni-linz.ac.at/pub/hsdis/' + flavoredLib], sha1, sha1path, True, True, sources=False)
if copyToDir is not None and exists(copyToDir):
shutil.copy(path, copyToDir)
def hcfdis(args):
"""disassemble HexCodeFiles embedded in text files
Run a tool over the input files to convert all embedded HexCodeFiles
to a disassembled format."""
parser = ArgumentParser(prog='mx hcfdis')
parser.add_argument('-m', '--map', help='address to symbol map applied to disassembler output')
parser.add_argument('files', nargs=REMAINDER, metavar='files...')
args = parser.parse_args(args)
path = mx.library('HCFDIS').get_path(resolve=True)
mx.run_java(['-cp', path, 'com.oracle.max.hcfdis.HexCodeFileDis'] + args.files)
if args.map is not None:
addressRE = re.compile(r'0[xX]([A-Fa-f0-9]+)')
with open(args.map) as fp:
lines = fp.read().splitlines()
symbols = dict()
for l in lines:
addressAndSymbol = l.split(' ', 1)
if len(addressAndSymbol) == 2:
address, symbol = addressAndSymbol
if address.startswith('0x'):
address = long(address, 16)
symbols[address] = symbol
for f in args.files:
with open(f) as fp:
lines = fp.read().splitlines()
updated = False
for i in range(0, len(lines)):
l = lines[i]
for m in addressRE.finditer(l):
sval = m.group(0)
val = long(sval, 16)
sym = symbols.get(val)
if sym:
l = l.replace(sval, sym)
updated = True
lines[i] = l
if updated:
mx.log('updating ' + f)
with open('new_' + f, "w") as fp:
for l in lines:
print >> fp, l
def isJVMCIEnabled(vm):
return vm != 'original' and not vm.endswith('nojvmci')
def jol(args):
"""Java Object Layout"""
joljar = mx.library('JOL_INTERNALS').get_path(resolve=True)
candidates = mx.findclass(args, logToConsole=False, matcher=lambda s, classname: s == classname or classname.endswith('.' + s) or classname.endswith('$' + s))
if len(candidates) > 0:
candidates = mx.select_items(sorted(candidates))
else:
# mx.findclass can be mistaken, don't give up yet
candidates = args
run_vm(['-javaagent:' + joljar, '-cp', os.pathsep.join([mx.classpath(), joljar]), "org.openjdk.jol.MainObjectInternals"] + candidates)
mx.update_commands(_suite, {
'build': [build, ''],
'buildjmh': [buildjmh, '[-options]'],
'buildvars': [buildvars, ''],
'buildvms': [buildvms, '[-options]'],
'c1visualizer' : [c1visualizer, ''],
'export': [export, '[-options] [zipfile]'],
'hsdis': [hsdis, '[att]'],
'hcfdis': [hcfdis, ''],
'igv' : [igv, ''],
'jdkhome': [print_jdkhome, ''],
'jmh': [jmh, '[VM options] [filters|JMH-args-as-json...]'],
'makejmhdeps' : [makejmhdeps, ''],
'shortunittest' : [shortunittest, '[unittest options] [--] [VM options] [filters...]', mx_unittest.unittestHelpSuffix],
'vm': [run_vm, '[-options] class [args...]'],
'deoptalot' : [deoptalot, '[n]'],
'longtests' : [longtests, ''],
'jol' : [jol, ''],
'makefile' : [mx_jvmci_makefile.build_makefile, 'build makefiles for JDK build', None, {'keepUnsatisfiedDependencies': True}],
})
mx.add_argument('--vmcwd', dest='vm_cwd', help='current directory will be changed to <path> before the VM is executed', default=None, metavar='<path>')
mx.add_argument('--installed-jdks', help='the base directory in which the JDKs cloned from $JAVA_HOME exist. ' +
'The VM selected by --vm and --vmbuild options is under this directory (i.e., ' +
join('<path>', '<jdk-version>', '<vmbuild>', 'jre', 'lib', '<vm>', mx.add_lib_prefix(mx.add_lib_suffix('jvm'))) + ')', default=None, metavar='<path>')
mx.add_argument('--vm', action='store', dest='vm', choices=_vmChoices.keys() + _vmAliases.keys(), help='the VM type to build/run')
mx.add_argument('--vmbuild', action='store', dest='vmbuild', choices=_vmbuildChoices, help='the VM build to build/run (default: ' + _vmbuildChoices[0] + ')')
mx.add_argument('--ecl', action='store_true', dest='make_eclipse_launch', help='create launch configuration for running VM execution(s) in Eclipse')
mx.add_argument('--vmprefix', action='store', dest='vm_prefix', help='prefix for running the VM (e.g. "/usr/bin/gdb --args")', metavar='<prefix>')
mx.add_argument('--gdb', action='store_const', const='/usr/bin/gdb --args', dest='vm_prefix', help='alias for --vmprefix "/usr/bin/gdb --args"')
mx.add_argument('--lldb', action='store_const', const='lldb --', dest='vm_prefix', help='alias for --vmprefix "lldb --"')
class JVMCIArchiveParticipant:
def __init__(self, dist):
self.dist = dist
self.jvmciServices = {}
def __opened__(self, arc, srcArc, services):
self.services = services
self.arc = arc
def __add__(self, arcname, contents):
if arcname.startswith('META-INF/jvmci.services/'):
service = arcname[len('META-INF/jvmci.services/'):]
self.jvmciServices.setdefault(service, []).extend([provider for provider in contents.split('\n')])
return True
if arcname.startswith('META-INF/jvmci.providers/'):
provider = arcname[len('META-INF/jvmci.providers/'):]
for service in contents.strip().split(os.linesep):
assert service
self.jvmciServices.setdefault(service, []).append(provider)
return True
elif arcname.endswith('_OptionDescriptors.class'):
# Need to create service files for the providers of the
# jdk.internal.jvmci.options.Options service created by
# jdk.internal.jvmci.options.processor.OptionProcessor.
provider = arcname[:-len('.class'):].replace('/', '.')
self.services.setdefault('jdk.internal.jvmci.options.OptionDescriptors', []).append(provider)
return False
def __addsrc__(self, arcname, contents):
return False
def __closing__(self):
for service, providers in self.jvmciServices.iteritems():
arcname = 'META-INF/jvmci.services/' + service
# Convert providers to a set before printing to remove duplicates
self.arc.zf.writestr(arcname, '\n'.join(frozenset(providers))+ '\n')
_jvmci_bootstrap_jdk = None
def get_jvmci_bootstrap_jdk():
"""
Gets the JDK from which a JVMCI JDK is created.
"""
global _jvmci_bootstrap_jdk
if not _jvmci_bootstrap_jdk:
def _versionCheck(version):
return version >= _minVersion and (not _untilVersion or version >= _untilVersion)
versionDesc = ">=" + str(_minVersion)
if _untilVersion:
versionDesc += " and <=" + str(_untilVersion)
_jvmci_bootstrap_jdk = mx.get_jdk(_versionCheck, versionDescription=versionDesc, tag='default')
return _jvmci_bootstrap_jdk
_jvmci_bootclasspath_prepends = []
def add_bootclasspath_prepend(dep):
assert isinstance(dep, mx.ClasspathDependency)
_jvmci_bootclasspath_prepends.append(dep)
class JVMCIJDKConfig(mx.JDKConfig):
def __init__(self, vmbuild):
# Ignore the deployable distributions here - they are only deployed during building.
# This significantly reduces the latency of the "mx java" command.
self.vmbuild = vmbuild
jdkDir = get_jvmci_jdk_dir(build=self.vmbuild, create=True, deployDists=False)
mx.JDKConfig.__init__(self, jdkDir, tag=_JVMCI_JDK_TAG)
def parseVmArgs(self, args, addDefaultArgs=True):
args = mx.expand_project_in_args(args, insitu=False)
jacocoArgs = mx_gate.get_jacoco_agent_args()
if jacocoArgs:
args = jacocoArgs + args
# Support for -G: options
def translateGOption(arg):
if arg.startswith('-G:+'):
if '=' in arg:
mx.abort('Mixing + and = in -G: option specification: ' + arg)
arg = '-Djvmci.option.' + arg[len('-G:+'):] + '=true'
elif arg.startswith('-G:-'):
if '=' in arg:
mx.abort('Mixing - and = in -G: option specification: ' + arg)
arg = '-Djvmci.option.' + arg[len('-G:+'):] + '=false'
elif arg.startswith('-G:'):
arg = '-Djvmci.option.' + arg[len('-G:'):]
return arg
args = map(translateGOption, args)
args = ['-Xbootclasspath/p:' + dep.classpath_repr() for dep in _jvmci_bootclasspath_prepends] + args
# Set the default JVMCI compiler
for jdkDist in reversed(jdkDeployedDists):
if isinstance(jdkDist, JvmciJDKDeployedDist):
if jdkDist._compilers:
jvmciCompiler = jdkDist._compilers[-1]
args = ['-Djvmci.compiler=' + jvmciCompiler] + args
break
if '-version' in args:
ignoredArgs = args[args.index('-version') + 1:]
if len(ignoredArgs) > 0:
mx.log("Warning: The following options will be ignored by the vm because they come after the '-version' argument: " + ' '.join(ignoredArgs))
return self.processArgs(args, addDefaultArgs=addDefaultArgs)
# Overrides JDKConfig
def run_java(self, args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None, addDefaultArgs=True):
if vm is None:
vm = get_vm()
if not isVMSupported(vm):
mx.abort('The ' + vm + ' is not supported on this platform')
if cwd is None:
cwd = _vm_cwd
elif _vm_cwd is not None and _vm_cwd != cwd:
mx.abort("conflicting working directories: do not set --vmcwd for this command")
_updateInstalledJVMCIOptionsFile(self.home)
args = self.parseVmArgs(args, addDefaultArgs=addDefaultArgs)
if _make_eclipse_launch:
mx.make_eclipse_launch(_suite, args, _suite.name + '-' + build, name=None, deps=mx.dependencies())
pfx = _vm_prefix.split() if _vm_prefix is not None else []
cmd = pfx + [self.java] + ['-' + vm] + args
return mx.run(cmd, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd)
"""
The dict of JVMCI JDKs indexed by vmbuild names.
"""
_jvmci_jdks = {}
def get_jvmci_jdk(vmbuild=None):
"""
Gets the JVMCI JDK corresponding to 'vmbuild'.
"""
if not vmbuild:
vmbuild = _vmbuild
jdk = _jvmci_jdks.get(vmbuild)
if jdk is None:
jdk = JVMCIJDKConfig(vmbuild)
_jvmci_jdks[vmbuild] = jdk
return jdk
class JVMCIJDKFactory(mx.JDKFactory):
def getJDKConfig(self):
jdk = get_jvmci_jdk(_vmbuild)
check_VM_exists(get_vm(), jdk.home)
return jdk
def description(self):
return "JVMCI JDK"
def mx_post_parse_cmd_line(opts):
mx.addJDKFactory(_JVMCI_JDK_TAG, mx.JavaCompliance('8'), JVMCIJDKFactory())
mx.set_java_command_default_jdk_tag(_JVMCI_JDK_TAG)
# Execute for the side-effect of checking that the
# boot strap JDK has a compatible version
get_jvmci_bootstrap_jdk()
jdkTag = mx.get_jdk_option().tag
if hasattr(opts, 'vm') and opts.vm is not None:
global _vm
_vm = dealiased_vm(opts.vm)
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--vm" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
if hasattr(opts, 'vmbuild') and opts.vmbuild is not None:
global _vmbuild
_vmbuild = opts.vmbuild
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--vmbuild" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
global _make_eclipse_launch
_make_eclipse_launch = getattr(opts, 'make_eclipse_launch', False)
global _vm_cwd
_vm_cwd = opts.vm_cwd
global _installed_jdks
_installed_jdks = opts.installed_jdks
global _vm_prefix
_vm_prefix = opts.vm_prefix
mx.instantiateDistribution('JVM_<vmbuild>_<vm>', dict(vmbuild=_vmbuild, vm=get_vm()))
for jdkDist in jdkDeployedDists:
def _close(jdkDeployable):
def _install(dist):
_installDistInJdks(jdkDeployable)
return _install
dist = jdkDist.dist()
dist.add_update_listener(_close(jdkDist))
if isinstance(jdkDist, JvmciJDKDeployedDist):
dist.set_archiveparticipant(JVMCIArchiveParticipant(dist))
|
smarr/GraalJVMCI8
|
mx.jvmci/mx_jvmci.py
|
Python
|
gpl-2.0
| 76,362
|
[
"VisIt"
] |
62a8f28c0aecceba748062aee466afb87b9210a442894e60cd3daa90e29bb60a
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import tempfile
import subprocess
from sys import argv
class Defines:
def __init__(self, compiler, flags=()):
self._compiler = compiler
assert isinstance(flags, (list, tuple))
self._flags = list(flags) + ["-E", "-dM"]
build_in = self._build_in_defs()
self._buildin = set(build_in)
def _run_compiler(self, filename):
raw_output = subprocess.check_output(
[self._compiler] + self._flags + [filename], stderr=subprocess.STDOUT)
return raw_output.decode('ascii').splitlines()
def _remove_define(self, line):
return line[len("#define"):].strip()
def _get_defs(self, filename):
lines = self._run_compiler(filename)
return map(self._remove_define, lines)
def _build_in_defs(self):
with tempfile.NamedTemporaryFile(delete=True, suffix='.cpp') as empty_file:
return self._get_defs(empty_file.name)
def build_in_defines(self):
return self._buildin
def defines(self, filename, include_build_in=False):
all_defs = set(self._get_defs(filename))
if include_build_in:
return all_defs
else:
return all_defs - self._buildin
if __name__ == "__main__":
compiler = argv[1]
filename = argv[2]
flags = argv[3:]
parser = Defines(compiler, flags)
map(print, parser.defines(filename))
|
KaiSzuttor/espresso
|
src/config/defines.py
|
Python
|
gpl-3.0
| 2,100
|
[
"ESPResSo"
] |
c73c432566e8ca8b481cf83ae22576c068b934a31553f3e6ecb99748ba20459c
|
"""
Script for testing brian on NSG portal.
Still trying to figure out how it all works.
"""
from brian import (Network, NeuronGroup, StateMonitor, SpikeMonitor,
PoissonInput,
mV, ms, second, Hz)
import numpy as np
network = Network()
tau = 20*ms
eqs = "dV/dt = -V/tau : volt"
lifgroup = NeuronGroup(10, eqs, threshold="V>=(20*mV)", reset=0*mV)
weights = np.linspace(0.1, 1, 10)
rates = np.arange(10, 100, 10)
inputgroups = []
for idx, (w, r) in enumerate(zip(weights, rates)):
inpgrp = PoissonInput(lifgroup[idx], 20, r*Hz, w*mV, state="V")
inputgroups.append(inpgrp)
network.add(lifgroup)
network.add(*inputgroups)
spikemon = SpikeMonitor(lifgroup)
vmon = StateMonitor(lifgroup, "V", record=True)
network.add(spikemon, vmon)
network.run(10*second, report="stdout")
spikes = spikemon.spiketimes.values()
voltage = vmon.values
np.savez("results.npz",
spikes=spikes,
voltages=voltage)
print("DONE")
|
achilleas-k/brian-scripts
|
nsgtest.py
|
Python
|
apache-2.0
| 968
|
[
"Brian"
] |
552983fecc98d1e34e9ab115448a7a829281d83f0f51b1cdd26f9f3e2c9902d4
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
# This file demonstrates how to connect VTK and ITK pipelines together
# in scripted generators with the new ConnectVTKITK wrapping functionality.
# Data is loaded in with VTK, processed with ITK and written back to disc
# with
#
# For this to work, you have to build InsightApplications/ConnectVTKITK
# as well.
#
# It also demonstrates the use of the python-specific itkPyCommand object.
#
# -- Charl P. Botha <cpbotha AT ieee.org>
import itk, itkvtk
import sys
from vtk import *
itk.auto_progress = True
# VTK will read the PNG image for us
reader = vtkPNGReader()
reader.SetFileName(sys.argv[1])
# it has to be a single component, itk::VTKImageImport doesn't support more
lum = vtkImageLuminance()
lum.SetInput(reader.GetOutput())
# let's cast the output to float
imageCast = vtkImageCast()
imageCast.SetOutputScalarTypeToFloat()
imageCast.SetInput(lum.GetOutput())
cannyImgType = itk.Image[itk.F, 2]
vtk2itk = itk.VTKImageToImageFilter[cannyImgType].New(imageCast)
canny = itk.CannyEdgeDetectionImageFilter[cannyImgType, cannyImgType].New(vtk2itk)
writerImgType = itk.Image[itk.US, 2]
rescaler = itk.RescaleIntensityImageFilter[cannyImgType, writerImgType].New(canny)
itk2vtk = iImageToVTKImageFilter[writerImgType].New(rescaler)
# finally write the image to disk using VTK
writer = vtkPNGWriter()
writer.SetFileName(sys.argv[2])
writer.SetInput(itk2vtk.GetOutput())
# before we call Write() on the writer, it is prudent to give
# our ITK pipeline an Update() call... this is not necessary
# for normal error-less operation, but ensures that exceptions
# thrown by ITK get through to us in the case of an error;
# This is because the VTK wrapping system does not support
# C++ exceptions.
rescaler.Update()
# write the file to disk...
writer.Write()
|
151706061/ITK
|
Wrapping/ExternalProjects/ItkVtkGlue/Wrapping/Python/Tests/CannyEdgeDetectionImageFilter.py
|
Python
|
apache-2.0
| 2,540
|
[
"VTK"
] |
883327a64052cb04a3f053828531cd9322f968a4ac6c7c26097f484e91bdf5cf
|
from gpaw import GPAW
from ase import Atoms
from gpaw.lrtddft import LrTDDFT
molecule = Atoms('Na2', positions=((0.0, 0.0, 0.0), (3.12, 0.0, 0.0)))
molecule.center(vacuum=6.0)
calc = GPAW(xc='PBE')
molecule.set_calculator(calc)
molecule.get_potential_energy()
lr = LrTDDFT(calc, xc='LDA', istart=0, jend=10, nspins=2)
lr.write('Omega_Na2.gz')
|
robwarm/gpaw-symm
|
doc/exercises/lrtddft/Na2TDDFT.py
|
Python
|
gpl-3.0
| 349
|
[
"ASE",
"GPAW"
] |
684cd14a4b250892b036a20c424559ed07e84123523ef1839107f2e146834d50
|
##############################################################################
# The Combustion Flame Engine - pyflam3ng
# http://combustion.sourceforge.net
# http://github.com/bobbyrward/pyflam3ng/tree/master
#
# Copyright (C) 2007-2008 by Bobby R. Ward <bobbyrward@gmail.com>
#
# The Combustion Flame Engine is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library; see the file COPYING.LIB. If not, write to
# the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
##############################################################################
import sys
import itertools
import random
import math
from collections import defaultdict
import numpy
import Image
import copy
from lxml import etree
from func import *
from .variations import variation_registry
from . import flam3
from . import util
from . import vector_utils as vu
EPSILON = 0.0000000000001
def float_equality(x, y):
return abs(x-y) < (x * EPSILON)
def load_flame(xml_source=None, fd=None, filename=None):
"""Load a set of genomes from an xml document
If filename is specified:
Loads the genomes from the file
If fd is specified:
Loads the genomes from the filelike object
If xml_source is specified:
Loads the genomes directly from the string
Parameters are tested in this order
"""
if filename is not None:
fd = open(filename)
try:
if fd is not None:
xml_source = fd.read()
finally:
if filename is not None:
fd.close()
tree = etree.fromstring(xml_source)
genome_nodes = tree.xpath('//flame')
return [load_genome(flame_node=node) for node in genome_nodes]
def load_genome(flame_node=None, xml_source=None, genome_handle=None):
"""Load a genome from a variety of sources
If xml_source is specified:
Loads the genome directly from the string
If flame_node is speficied:
Loads the genome from the lxml.etree.Element
If gnome_handle is specified:
Loads the genomes from the pyflam3ng.flam3.GenomeHandle
If none are specified, returns None
Parameters are tested in this order
"""
if xml_source:
flame_node = etree.fromstring(xml_source).xpath('//flame')[0]
if flame_node is not None:
return Genome(flame_node=flame_node)
elif genome_handle is not None:
return Genome(genome_handle=genome_handle)
else:
return None
class Point(object):
"""A 2d point in cartesian space"""
def __init__(self, x=0.0, y=0.0, seq=None, scalar=None):
if seq is not None:
self.x = seq[0]
self.y = seq[1]
elif scalar is not None:
self.x = scalar
self.y = scalar
else:
self.x = x
self.y = y
def clone(self):
return Point(self.x, self.y)
def __iter__(self):
yield self.x
yield self.y
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise IndexError()
def __len__(self):
return 2
def __repr__(self):
return '<Point x=%f y=%f>' % (self.x, self.y)
def magnitude_squared(self):
return self.x**2 + self.y**2
def magnitude(self):
return math.sqrt(self.magnitude_squared())
def angle_radians(self):
return math.atan2(self.y, self.x)
def angle(self):
return self.angle_radians() * (180.0/math.pi)
def _get_polar(self):
return self.magnitude(), self.angle()
def _set_polar(self, angle, length):
self.x = math.cos(angle * math.pi / 180.0)
self.y = math.sin(angle * math.pi / 180.0)
polar = property(_get_polar, _set_polar)
def __add__(self, rhs):
return Point(self.x + rhs[0], self.y + rhs[1])
def __sub__(self, rhs):
return Point(self.x - rhs[0], self.y - rhs[1])
def __mul__(self, rhs):
return Point(self.x * rhs[0], self.y * rhs[1])
def __div__(self, rhs):
return Point(self.x / rhs[0], self.y / rhs[1])
def __iadd__(self, rhs):
self.x += rhs[0]
self.y += rhs[1]
return self
def __isub__(self, rhs):
self.x -= rhs[0]
self.y -= rhs[1]
return self
def __imul__(self, rhs):
self.x *= rhs[0]
self.y *= rhs[1]
return self
def __idiv__(self, rhs):
self.x /= rhs[0]
self.y /= rhs[1]
return self
def __eq__(self, rhs):
if rhs is None:
return False
if float_equality(self.x, rhs[0]) and float_equality(self.y, rhs[1]):
return True
return False
def in_circle(self, center, radius):
# find the difference between the two points
u = self - center
# find the magnitude squared of the difference
mm = u.magnitude_squared()
# if the magnitude is less than the radius it is inside the circle
return (radius*radius - mm) >= 0.0
def in_triangle(self, v0, v1, v2):
e0 = self - v0
e1 = v1 - v0
e2 = v2 - v0
if float_equality(e1.x, 0.):
if float_equality(e2.x, 0.):
return False
u = e0.x / e2.x
if u < 0 or u > 1:
return False
if float_equality(e1.y, 0.):
return False
v = (e0.y - e2.y * u) / e1.y
if v < 0:
return False
else:
d = e2.y * e1.x - e2.x * e1.y
if float_equality(d, 0.):
return False
u = (e0.y * e1.x - e0.x * e1.y) / d
if u < 0 or u > 1:
return False
v = (e0.x - e2.x * u) / e1.x
if v < 0:
return False
return u + v <= 1.0
def in_rect(self, left, top, right, bottom):
if self.x < left or self.x > right:
return False
if self.y < top or self.y > bottom:
return False
return True
def in_rect_wh(self, left, top, width, height):
return self.in_rect(left, top, left + width, top + height)
class Matrix(object):
def __init__(self):
self._matrix = numpy.matrix([[1,0,0],[0,1,0],[0,0,1]])
def clone(self):
m = Matrix()
m._matrix = self._matrix.copy()
return m
def transform(self, p):
return Point(self._matrix[0,0] * p[0] + self._matrix[1,0] * p[1] + self._matrix[2,0],
self._matrix[0,1] * p[0] + self._matrix[1,1] * p[1] + self._matrix[2,1])
def transform_distance(self, p):
return Point(self._matrix[0,0] * p[0] + self._matrix[1,0] * p[1],
self._matrix[0,1] * p[0] + self._matrix[1,1] * p[1])
def rotate(self, degrees):
radians = degrees * (180.0/math.pi)
c = math.cos(radians)
s = math.sin(radians)
m = numpy.matrix([[c, -s, 0], [s, c, 0], [0, 0, 1]])
self._matrix *= m
def translate(self, pos):
m = numpy.matrix([[1, 0, 0], [0, 1, 0], [pos[0], pos[1], 1]])
self._matrix *= m
def scale(self, point):
m = numpy.matrix([[point[0], 0, 0], [0, point[1], 0], [0, 0, 1]])
self._matrix *= m
def inverse(self):
m = Matrix()
m._matrix = numpy.linalg.inv(self._matrix)
return m
def __mul__(self, rhs):
if hasattr(rhs, '_matrix'):
rhs = rhs._matrix
return self._matrix * rhs
class Variations(object):
"""Wraps the variations in use by an XForm"""
def __init__(self):
self._values = defaultdict(lambda: 0.0)
self._variables = {}
def __getitem__(self, key):
return self.values[key]
def __setitem__(self, key, value):
self.values[key] = value
def __delitem__(self, key):
del self.values[key]
def __contains__(self, key):
return key in self._values
def variation_vars(self, variation_name=None):
if variation_name in self._variables:
return self._variables[variation_name]
else:
return None
def set_variable(self, variation_name, variable_name, value):
if variation_name in self._variables:
vars = self._variables[variation_name]
if variable_name in vars:
vars[variable_name] = value
else:
raise KeyError('Unknown variable')
else:
raise KeyError('Unknown variation')
def get_variable(self, variation_name, variable_name):
if variation_name in self._variables:
vars = self._variables[variation_name]
if variable_name in vars:
return vars[variable_name]
else:
raise KeyError('Unknown variable')
else:
raise KeyError('Unknown variation')
def set_variation(self, variation_name, value=1.0):
if variation_name not in variation_registry.keys():
return KeyError('Unknown variation')
if variation_name in self._values:
if value == 0.0:
self._values.pop(variation_name)
else:
self._values[variation_name] = value
else:
self._values.setdefault(variation_name, value)
if variation_registry[variation_name]:
if value == 0.0:
self._variables.pop(variation_name)
else:
self._variables.setdefault(variation_name,
variation_registry[variation_name])
def _get_values(self):
return self._values
values = property(_get_values)
def _get_variables(self):
return self._variables
variables = property(_get_variables)
class Palette(object):
def __init__(self):
self.array = numpy.zeros((256,3), numpy.float32)
def smooth(self, ntries=50, trysize=10000):
self.array = util.palette_improve(self.array, ntries, trysize)
def adjust_hue(self, val):
for i in xrange(256):
color = self.array[i]
color = rgb2hls(color[0], color[1], color[2])
self.array[i] = hls2rgb(color[0]+val, color[1], color[2])
def adjust_sat(self, val):
for i in xrange(256):
color = self.array[i]
color = rgb2hls(color[0], color[1], color[2])
self.array[i] = hls2rgb(color[0], color[1], color[2]+val)
def adjust_bright(self, val):
for i in xrange(256):
color = self.array[i]
color = rgb2hls(color[0], color[1], color[2])
self.array[i] = hls2rgb(color[0], color[1]+val, color[2])
def rotate(self, slots):
tmp = numpy.zeros((256,3))
tmp[:slots] = self.array[-slots:]
tmp[slots:] = self.array[:-slots]
self.array[:] = tmp[:]
def random(self, h_ranges=[(0,1)], l_ranges=[(0,1)], s_ranges=[(0,1)],
blocks=(32,64)):
if type(blocks) == int:
nblocks = blocks
elif type(blocks) == tuple and len(blocks) == 2:
nblocks = random.randint(blocks[0], blocks[1])
else:
raise TypeError('blocks must be int or 2-tuple range')
mbs = 256/nblocks
mbr = 256%nblocks
bsv = mbs/2
bs = []
for i in xrange(nblocks):
v = random.randint(-bsv, bsv)
mbr -= v
bs.append(mbs+v)
if mbr > 0:
r = len(bs)/mbr
for i in xrange(mbr):
bs[(i*r)+random.randrange(r)] += 1
elif mbr < 0:
r = -len(bs)/mbr
for i in xrange(-mbr):
bs[(i*r)+random.randrange(r)] -= 1
index = 0
for b in bs:
hr = h_ranges[random.randint(0, len(h_ranges)-1)]
h = random.uniform(hr[0], hr[1])
lr = l_ranges[random.randint(0, len(l_ranges)-1)]
l = random.uniform(lr[0], lr[1])
sr = s_ranges[random.randint(0, len(s_ranges)-1)]
s = random.uniform(sr[0], sr[1])
for i in xrange(b):
self.array[index] = hls2rgb(h,l,s)
index += 1
def from_seed(self, seed, c_split=0, split=90, dist=64, space='rgb', curve='hcos'):
c_split /= 360.0
split /= 360.0
if space=='rgb':
h,l,s = rgb2hls(*seed)
comp = hls2rgb(h+c_split+0.5,l,s)
lspl = hls2rgb(h-split,l,s)
rspl = hls2rgb(h+split,l,s)
elif space=='hls':
seed = rgb2hls(*seed)
comp = [clip(h+c_split+0.5,0,1,True),l,s]
lspl = [clip(h-split,0,1,True),l,s]
rspl = [clip(h+split,0,1,True),l,s]
else:
raise ValueError('Invalid color space')
tmp = numpy.zeros((256,3))
tmp[:dist] = vu.get_spline([vu.CP(comp), vu.CP(lspl)], dist, curve=curve)
tmp[dist:128] = vu.get_spline([vu.CP(lspl), vu.CP(seed)], 128-dist, curve=curve)
tmp[128:256-dist] = vu.get_spline([vu.CP(seed), vu.CP(rspl)], 128-dist, curve=curve)
tmp[256-dist:] = vu.get_spline([vu.CP(rspl), vu.CP(comp)], dist, curve=curve)
if space=='hls':
for i in xrange(256):
tmp[i] = hls2rgb(*tmp[i])
self.array[:] = tmp[:]
def from_seeds(self, seeds, space='rgb', curve='hcos'):
ns = len(seeds)
d = 256/ns
r = 256%ns
ds = []
for i in xrange(ns):
if space=='hls':
seeds[i] = rgb2hls(*seeds[i])
if i+1<=r: ds.append(d+1)
else: ds.append(d)
tmp = numpy.zeros((256,3))
for i in xrange(ns):
v = vu.get_spline([vu.CP(seeds[i-1]), vu.CP(seeds[i])], ds[i], curve=curve)
tmp[sum(ds[0:i]):sum(ds[0:i+1])] = v
if space=='hls':
for i in xrange(256):
tmp[i] = hls2rgb(*tmp[i])
self.array[:] = tmp[:]
def from_file(self, filename):
img = Image.open(filename)
bin = map(ord, img.tostring())
for i in xrange(256):
x = random.randint(0, img.size[0]-1)
y = random.randint(0, img.size[1]-1)
idx = 3*(x + img.size[0]*y)
self.array[i] = bin[idx:idx+3]
self.smooth()
class Xform(object):
def __init__(self, xml_node=None, **kwargs):
self._weight = kwargs.get('weight', 0.0)
self._color = kwargs.get('color', 0.0)
self._symmetry = kwargs.get('symmetry', 0.0)
self._opacity = kwargs.get('opacity', 1.0)
self.animate = 0.0
self._x = Point()
self._y = Point()
self._o = Point()
self._px = Point()
self._py = Point()
self._po = Point()
self.coefs = kwargs.get('coefs', [0.0, 1.0, 1.0, 0.0, 0.0, 0.0])
self.post = kwargs.get('post', [0.0, 1.0, 1.0, 0.0, 0.0, 0.0])
#self.coefs = [(x,y for x,y in [self.x, self.y, self.o])] #?
#self.post = [(x,y for x,y in [self.px, self.py, self.po])]
self.vars = Variations()
if xml_node is not None:
self._load_xml(xml_node)
def copy(self):
return copy.deepcopy(self)
def get_pad(self):
hole_vars = ['spherical', 'ngon', 'julian', 'juliascope', 'polar'
,'wedge_sph', 'wedge_julia']
pad = self.copy()
pad.coefs = [0.0, 1.0, 1.0, 0.0, 0.0, 0.0]
pad.weight = 0.0
pad.symmetry = 1.0
if len(set(pad.vars.values.keys()).intersection(hole_vars)) > 0:
pad.coefs = [-1.0, 0.0, 0.0, -1.0, 0.0, 0.0]
pad.vars.set_variation('linear', -1.0)
if 'rectangles' in pad.vars.values.keys():
pad.vars.set_variation('rectangles', 1.0)
pad.vars.set_variable('rectangles', 'x', 0.0)
pad.vars.set_variable('rectangles', 'y', 0.0)
if 'rings2' in pad.vars.values.keys():
pad.vars.set_variation('rings2', 1.0)
pad.vars.set_variable('rings2', 'val', 0.0)
if 'fan2' in pad.vars.values.keys():
pad.vars.set_variation('fan2', 1.0)
pad.vars.set_variable('fan2', 'x', 0.0)
pad.vars.set_variable('fan2', 'y', 0.0)
if 'blob' in pad.vars.values.keys():
pad.vars.set_variation('blob', 1.0)
pad.vars.set_variable('blob', 'low', 1.0)
pad.vars.set_variable('blob', 'high', 1.0)
pad.vars.set_variable('blob', 'waves', 1.0)
if 'perspective' in pad.vars.values.keys():
pad.vars.set_variation('perspective', 1.0)
pad.vars.set_variable('perspective', 'angle' , 0.0)
if 'curl' in pad.vars.values.keys():
pad.vars.set_variation('curl', 1.0)
pad.vars.set_variable('curl', 'c1', 0.0)
pad.vars.set_variable('curl', 'c2', 0.0)
if 'super_shape' in pad.vars.values.keys():
pad.vars.set_variation('super_shape', 1.0)
pad.vars.set_variable('super_shape', 'n1', 2.0)
pad.vars.set_variable('super_shape', 'n2', 2.0)
pad.vars.set_variable('super_shape', 'n3', 2.0)
pad.vars.set_variable('super_shape', 'rnd', 0.0)
pad.vars.set_variable('super_shape', 'holes', 0.0)
if 'fan' in pad.vars.values.keys():
pad.vars.set_variation('fan', 1.0)
if 'rings' in pad.vars.values.keys():
pad.vars.set_variation('rings', 1.0)
#tot = sum(pad.vars.values.values())
#for v in pad.vars.values: pad.vars.values[v] /= tot
return
def rotate_x(self, deg):
self._x.ang += deg
def rotate_y(self, deg):
self._y.ang += deg
def scale_x(self, scale):
self._x.len *= scale
def scale_y(self, scale):
self._y.len *= scale
def rotate(self, deg):
self.rotate_x(deg)
self.rotate_y(deg)
def pivot(self, deg):
self._o.ang += deg
def scale(self, scale):
self.scale_x(scale)
self.scale_y(scale)
#properties
def _get_weight(self):
return self._weight
def _set_weight(self, weight):
self._weight = clip(weight, mini=0.0)
weight = property(_get_weight, _set_weight)
def _get_color(self):
return self._color
def _set_color(self, color):
self._color = clip(color, mini=0.0, maxi=1.0)
color = property(_get_color, _set_color)
def _get_symmetry(self):
return self._symmetry
def _set_symmetry(self, symmetry):
self._symmetry = clip(symmetry, mini=-1.0, maxi=1.0)
symmetry = property(_get_symmetry, _set_symmetry)
def _get_opacity(self):
return self._opacity
def _set_opacity(self, opacity):
self._opacity = clip(opacity, mini=0.0, maxi=1.0)
opacity = property(_get_opacity, _set_opacity)
def _get_x(self):
return self._x
def _set_x(self, x):
if not isinstance(x, Point):
if len(x) <> 2:
raise ValueError('Need x,y point for x')
else:
self._x = Point(x[0], x[1])
else:
self._x = x
x = property(_get_x, _set_x)
def _get_y(self):
return self._y
def _set_y(self, y):
if not isinstance(y, Point):
if len(y) <> 2:
raise ValueError('Need x,y point for y')
else:
self._y = Point(y[0], y[1])
else:
self._y = y
y = property(_get_y, _set_y)
def _get_o(self):
return self._o
def _set_o(self, o):
if not isinstance(o, Point):
if len(o) <> 2:
raise ValueError('Need x,y point for o')
else:
self._o = Point(o[0], o[1])
else:
self._o = o
o = property(_get_o, _set_o)
def _get_px(self):
return self._px
def _set_px(self, px):
if not isinstance(x, Point):
if len(px) <> 2:
raise ValueError('Need x,y point for px')
else:
self._px = Point(px[0], px[1])
else:
self._px = px
px = property(_get_px, _set_px)
def _get_py(self):
return self._py
def _set_py(self, py):
if not isinstance(y, Point):
if len(py) <> 2:
raise ValueError('Need x,y point for py')
else:
self._py = Point(py[0], py[1])
else:
self._py = py
py = property(_get_py, _set_py)
def _get_po(self):
return self._po
def _set_po(self, po):
if not isinstance(po, Point):
if len(po) <> 2:
raise ValueError('Need x,y point for po')
else:
self._po = Point(po[0], po[1])
else:
self._po = po
po = property(_get_po, _set_po)
def _get_coefs(self):
return [self._x.x, self._x.y, self._y.x, self._y.y, self._o.x, self._o.y]
def _set_coefs(self, coefs):
if type(coefs)==list or type(coefs)==tuple:
if len(coefs)==3 and isinstance(coefs[0],Point):
self._x = coefs[0]
self._y = coefs[1]
self._o = coefs[2]
elif len(coefs)==6:
self._x = Point(coefs[0], coefs[1])
self._y = Point(coefs[2], coefs[3])
self._o = Point(coefs[4], coefs[5])
else:
raise TypeError('need list of 3 Points or 6 vals')
coefs = property(_get_coefs, _set_coefs)
def _get_post(self):
return [self._px.x, self._px.y, self._py.x, self._py.y, self._po.x, self._po.y]
def _set_post(self, coefs):
if type(coefs)==list or type(coefs)==tuple:
if len(coefs)==3 and isinstance(coefs[0],Point):
self._px = coefs[0]
self._py = coefs[1]
self._po = coefs[2]
elif len(coefs)==6:
self._px = Point(coefs[0], coefs[1])
self._py = Point(coefs[2], coefs[3])
self._po = Point(coefs[4], coefs[5])
else:
raise TypeError('need list of 3 Points or 6 vals')
post = property(_get_post, _set_post)
"""This is for interpo for now"""
def get_attribs(self):
attribs = {'x': self.x, 'y': self.y, 'o': self.o
,'weight': self.weight, 'color': self.color
,'symmetry': self.symmetry, 'opacity': self.opacity
,'vars': self.vars.values, 'variables': self.vars.variables}
if self.post <> [0, 1, 1, 0, 0, 0]:
attribs.setdefault('px', self.px)
attribs.setdefault('py', self.py)
attribs.setdefault('po', self.po)
return attribs
def _load_xml(self, xform_node):
def scalar_attrib(src_name, dest_name=None, coerce_type=float, node=xform_node):
if src_name in node.attrib:
setattr(self, dest_name if dest_name else src_name,
coerce_type(node.attrib[src_name]))
def whitespace_array(src_name, coerce_type=float, node=xform_node):
return map(coerce_type, node.attrib.get(src_name).split(' '))
scalar_attrib('weight')
self.color = 0.0
scalar_attrib('color')
scalar_attrib('symmetry', 'symmetry')
scalar_attrib('color_speed', 'symmetry')
scalar_attrib('animate')
self.opacity = 1.0
scalar_attrib('opacity')
coefs_list = whitespace_array('coefs')
self._x = Point(coefs_list[0], coefs_list[1])
self._y = Point(coefs_list[2], coefs_list[3])
self._o = Point(coefs_list[4], coefs_list[5])
if 'post' in xform_node.attrib:
post_list = whitespace_array('post')
self._px = Point(post_list[0], post_list[1])
self._py = Point(post_list[2], post_list[3])
self._po = Point(post_list[4], post_list[5])
for name, value in xform_node.attrib.iteritems():
if name in variations.variation_registry:
self.vars.set_variation(name, float(value))
continue
parts = name.split('_')
if len(parts) == 2 and parts[0] in variations.variation_registry:
self.vars.set_variable(parts[0], parts[1], value)
#TODO: chaos
#---end Xform
class Genome(object):
def __init__(self, random=True, flame_node=None, genome_handle=None):
self.set_defaults()
if flame_node is not None:
self._init_from_node(flame_node)
elif genome_handle is not None:
self._init_from_handle(genome_handle)
else:
self.genome_handle = flam3.GenomeHandle()
if random: self.random()
def set_defaults(self):
self.time = 0.0
self._finalx = False
self._final = Xform()
self._final.vars.set_variation('linear', 1.0)
self.palette = Palette()
self.center = numpy.zeros(1, [('x', numpy.float32), ('y', numpy.float32)])
self.gamma = 4.0
self.vibrancy = 1.0
self.contrast = 1.0
self.brightness = 4.0
self.symmetry = 0
self.hue_rotation = 0.0
self.rotate = 0.0
self.pixels_per_unit = 50
self.interpolation = flam3.flam3_interpolation_linear
self.palette_interpolation = flam3.flam3_palette_interpolation_hsv
self.highlight_power = -1.0
self.background = numpy.zeros(1, [('r', numpy.uint8), ('g', numpy.uint8), ('b', numpy.uint8)])
self.width = 100
self.height = 100
self.spatial_oversample = 1
self.spatial_filter_radius = 0.5
self.zoom = 0.0
self.sample_density = 1
self.estimator = 9.0
self.estimator_minimum = 0.0
self.estimator_curve = 0.4
self.gam_lin_thresh = 0.01
self.nbatches = 1
self.ntemporal_samples = 1000
self.spatial_filter_select = flam3.flam3_gaussian_kernel
self.interpolation_type = flam3.flam3_inttype_log
self.temporal_filter_type = flam3.flam3_temporal_box
self.temporal_filter_width = 1.0
self.temporal_filter_exp = 0.0
self.palette_mode = flam3.flam3_palette_mode_step
self.xforms = []
"""This is for interpo for now"""
def get_attribs(self):
attribs = {'brightness': self.brightness, 'contrast': self.contrast
,'gamma': self.gamma, 'vibrancy': self.vibrancy
,'rotate': self.rotate, 'scale': self.pixels_per_unit
,'symmetry': self.symmetry, 'center': self.center
,'palette': self.palette.array}
xattribs = {}
for i in xrange(len(self.xforms)):
xattribs.setdefault('xf'+str(i), self.xforms[i].get_attribs())
attribs.setdefault('xforms', xattribs)
return attribs
def has_final(self):
return self._finalx
def enable_final(self):
self._finalx = True
def disable_final(self):
self._finalx = False
def _get_final(self):
if not self._finalx: return None
else: return self._final
def _set_final(self, xform):
if not xform:
self._finalx = False
else:
self._final = xform
final = property(_get_final, _set_final,
doc='Returns the final xform (None if disabled).')
def _get_flame_node(self):
self._flame_node = self.to_xml()
return self._flame_node
flame_node = property(_get_flame_node)
def _init_from_node(self, flame_node):
self._flame_node = flame_node
self._refresh_handle_from_self()
self._refresh_self_from_handle()
def _init_from_handle(self, genome_handle):
self.genome_handle = genome_handle
self._refresh_self_from_handle()
#start xml
def to_xml(self):
root = etree.Element('flame')
root.set('name', self.name)
root.set('time', str(self.time))
root.set('size', '%d %d' % (self.width, self.height))
root.set('center', '%f %f' % tuple(self.center[0]))
root.set('rotate', str(self.rotate))
root.set('scale', str(self.pixels_per_unit))
if self.zoom <> 0:
root.set('zoom', str(self.zoom))
root.set('oversample', str(self.spatial_oversample))
root.set('filter', str(self.spatial_filter_radius))
shapes = ['gaussian', 'hermite', 'box', 'triangle', 'bell', 'bspline',
'lanczos3', 'lanczos2', 'mitchell', 'blackman', 'catrom',
'hamming', 'hanning', 'quadratic']
root.set('filter_shape', shapes[self.spatial_filter_select])
root.set('quality', str(self.nbatches))
root.set('brightness', str(self.brightness))
root.set('gamma', str(self.gamma))
root.set('vibrancy', str(self.vibrancy))
root.set('contrast', str(self.contrast))
root.set('highlight_power', str(self.highlight_power))
root.set('background', '%d %d %d' % tuple(self.background[0]))
if self.symmetry <> 0:
root.set('symmetry', str(self.symmetry))
if self.interpolation == 1:
root.set('interpolation', 'smooth')
if self.palette_interpolation == 1:
root.set('palette_interpolation', 'sweep')
types = ['linear', 'log', 'old', 'older']
if self.interpolation_type <> 0:
root.set('interpolation_type', types[self.interpolation_type])
root.set('estimator_radius', str(self.estimator))
root.set('estimator_minimum', str(self.estimator_minimum))
root.set('estimator_curve', str(self.estimator_curve))
if self.ntemporal_samples <> 0:
root.set('temporal_samples', '%d' % (self.ntemporal_samples))
if self.temporal_filter_type <> 0:
root.set('temporal_filter_type', str(self.temporal_filter_type))
if self.temporal_filter_width <> 0:
root.set('temporal_filter_width', str(self.temporal_filter_width))
if self.temporal_filter_exp <> 0:
root.set('temporal_filter_exp', str(self.temporal_filter_exp))
for xf in self.xforms:
xroot = etree.SubElement(root, "xform")
xroot.set('weight', str(xf.weight))
xroot.set('color', str(xf.color))
xroot.set('symmetry', str(xf.symmetry))
#xroot.set('chaos',
xroot.set('coefs', '%f %f %f %f %f %f' % tuple(xf.coefs))
if xf.post <> [0, 1, 1, 0, 0, 0]:
xroot.set('post', '%f %f %f %f %f %f' % tuple(xf.post))
for var, weight in xf.vars.values.items():
xroot.set(var, str(weight))
if var in xf.vars.variables.keys():
for vari, valu in xf.vars.variables[var].items():
xroot.set('%s_%s' % (var,vari), str(valu))
if self.has_final():
froot = etree.SubElement(root, "finalxform")
froot.set('color', str(self.final.color))
froot.set('symmetry', str(self.final.symmetry))
froot.set('coefs', '%f %f %f %f %f %f' % tuple(self.final.coefs))
if self.final.post <> [0, 1, 1, 0, 0, 0]:
froot.set('post', '%f %f %f %f %f %f' % tuple(self.final.post))
for var, weight in self.final.vars.values.items():
froot.set(var, str(weight))
if var in self.final.vars.variables.keys():
for vari, valu in self.final.vars.variables[var].items():
froot.set('%s_%s' % (var,vari), str(valu))
for i in xrange(256):
croot = etree.SubElement(root, "color")
croot.set('index', str(i))
croot.set('rgb', '%.2f %.2f %.2f' % tuple(self.palette.array[i]))
return root
#end xml
def clone(self):
return load_genome(xml_source=etree.tostring(self._flame_node))
def render(self, buffer, **kwargs):
return self.genome_handle.render(buffer, **kwargs)
def random(self, variations=None, symmetry=False, num_xforms=2):
if variations is None:
variations = flam3.get_variation_list()
self.genome_handle.random(variations, symmetry, num_xforms)
self._refresh_self_from_handle()
def _refresh_handle_from_self(self):
self.genome_handle = flam3.from_xml(etree.tostring(self._flame_node))[0]
def _refresh_self_from_handle(self):
xml_source = flam3.to_xml(self.genome_handle)
self._flame_node = etree.fromstring(xml_source)
attrib = self._flame_node.attrib
def scalar_attrib(src_name, dest_name=None, coerce_type=float, node=self._flame_node):
if src_name in node.attrib:
setattr(self, dest_name if dest_name else src_name,
coerce_type(node.attrib[src_name]))
def whitespace_array(src_name, coerce_type=float, node=self._flame_node):
return map(coerce_type, node.attrib.get(src_name).split(' '))
def mapped_attrib(src_name, dest_name=None, mapping={}, node=self._flame_node):
if src_name in node.attrib:
setattr(self, dest_name if dest_name else src_name,
mapping[node.attrib[src_name]])
self.width, self.height = whitespace_array('size', int)
self.center.fill(buffer(numpy.array(whitespace_array('center'))))
self.background.fill(buffer(numpy.array(whitespace_array('background'))))
self.name = 'unknown'
scalar_attrib('name', coerce_type=str)
scalar_attrib('time')
scalar_attrib('scale', 'pixels_per_unit')
scalar_attrib('zoom')
scalar_attrib('rotate')
scalar_attrib('filter', 'spatial_filter_radius')
scalar_attrib('temporal_filter_width')
scalar_attrib('quality', 'sample_density')
scalar_attrib('passes', 'nbatches')
scalar_attrib('temporal_samples', 'ntemporal_samples')
scalar_attrib('brightness')
scalar_attrib('gamma')
scalar_attrib('highlight_power')
scalar_attrib('vibrancy')
scalar_attrib('estimator_radius', 'estimator')
scalar_attrib('estimator_minimum')
scalar_attrib('estimator_curve')
scalar_attrib('gamma_threshold', 'gam_lin_thresh')
scalar_attrib('supersample', 'spatial_oversample', int)
mapped_attrib('interpolation', mapping={
'linear': flam3.flam3_interpolation_linear,
'smooth': flam3.flam3_interpolation_smooth,
})
mapped_attrib('palette_interpolation', mapping={
'hsv': flam3.flam3_palette_interpolation_hsv,
'sweep': flam3.flam3_palette_interpolation_sweep,
})
mapped_attrib('filter_shape', 'spatial_filter_select', mapping={
'gaussian': flam3.flam3_gaussian_kernel,
'hermite': flam3.flam3_hermite_kernel,
'box': flam3.flam3_box_kernel,
'triangle': flam3.flam3_triangle_kernel,
'bell': flam3.flam3_bell_kernel,
'bspline': flam3.flam3_b_spline_kernel,
'lanczos3': flam3.flam3_lanczos3_kernel,
'lanczos2': flam3.flam3_lanczos2_kernel,
'mitchell': flam3.flam3_mitchell_kernel,
'blackman': flam3.flam3_blackman_kernel,
'catrom': flam3.flam3_catrom_kernel,
'hamming': flam3.flam3_hamming_kernel,
'hanning': flam3.flam3_hanning_kernel,
'quadratic': flam3.flam3_quadratic_kernel,
})
mapped_attrib('temporal_filter_type', mapping={
'box': flam3.flam3_temporal_box,
'gaussian': flam3.flam3_temporal_gaussian,
'exp': flam3.flam3_temporal_exp,
})
mapped_attrib('palette_mode', mapping={
'step': flam3.flam3_palette_mode_step,
'linear': flam3.flam3_palette_mode_linear,
})
mapped_attrib('interpolation', mapping={
'linear': flam3.flam3_interpolation_linear,
'smooth': flam3.flam3_interpolation_smooth,
})
mapped_attrib('interpolation_type', mapping={
'linear': flam3.flam3_inttype_linear,
'log': flam3.flam3_inttype_log,
'old': flam3.flam3_inttype_compat,
'older': flam3.flam3_inttype_older,
})
mapped_attrib('palette_interpolation', mapping={
'hsv': flam3.flam3_palette_interpolation_hsv,
'sweep': flam3.flam3_palette_interpolation_sweep ,
})
sym_node = self._flame_node.xpath('//symmetry')
if sym_node:
scalar_attrib('symmetry', coerce_type=int, node=sym_node[0])
self.palette = Palette()
for color_node in self._flame_node.xpath('//color'):
# should this be int(math.floor(float(... ?
#TODO: This loses all the float palette entries from flam3. Is this what we want?
index = int(float(color_node.attrib['index']))
rgb = map(int, whitespace_array('rgb', node=color_node))
self.palette.array[index] = rgb
self.xforms = []
for xform_node in self._flame_node.xpath('//xform'):
self.xforms.append(Xform(xml_node=xform_node))
|
bobbyrward/pyflam3ng
|
pyflam3ng/__init__.py
|
Python
|
gpl-3.0
| 37,943
|
[
"Gaussian"
] |
7fdaf99d86dcd24e2c12c5bbd720fe25c3d560f496dc6871ed750d5d5f7b9e11
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from glob import glob
class Bowtie2(Package):
"""Bowtie 2 is an ultrafast and memory-efficient tool for aligning
sequencing reads to long reference sequences"""
homepage = "bowtie-bio.sourceforge.net/bowtie2/index.shtml"
url = "http://downloads.sourceforge.net/project/bowtie-bio/bowtie2/2.3.1/bowtie2-2.3.1-source.zip"
version('2.3.1', 'b4efa22612e98e0c23de3d2c9f2f2478')
version('2.3.0', '3ab33f30f00f3c30fec1355b4e569ea2')
version('2.2.5', '51fa97a862d248d7ee660efc1147c75f')
depends_on('tbb', when='@2.3.0:')
depends_on('readline', when='@2.3.1:')
depends_on('perl', type='run')
depends_on('python', type='run')
depends_on('zlib', when='@2.3.1:')
patch('bowtie2-2.2.5.patch', when='@2.2.5', level=0)
patch('bowtie2-2.3.1.patch', when='@2.3.1', level=0)
patch('bowtie2-2.3.0.patch', when='@2.3.0', level=0)
# seems to have trouble with 6's -std=gnu++14
conflicts('%gcc@6:')
@run_before('install')
def filter_sbang(self):
"""Run before install so that the standard Spack sbang install hook
can fix up the path to the perl|python binary.
"""
with working_dir(self.stage.source_path):
kwargs = {'ignore_absent': True, 'backup': False, 'string': False}
match = '^#!/usr/bin/env perl'
perl = self.spec['perl'].command
substitute = "#!{perl}".format(perl=perl)
files = ['bowtie2', ]
filter_file(match, substitute, *files, **kwargs)
match = '^#!/usr/bin/env python'
python = self.spec['python'].command
substitute = "#!{python}".format(python=python)
files = ['bowtie2-build', 'bowtie2-inspect']
filter_file(match, substitute, *files, **kwargs)
def install(self, spec, prefix):
make()
mkdirp(prefix.bin)
for bow in glob("bowtie2*"):
install(bow, prefix.bin)
# install('bowtie2',prefix.bin)
# install('bowtie2-align-l',prefix.bin)
# install('bowtie2-align-s',prefix.bin)
# install('bowtie2-build',prefix.bin)
# install('bowtie2-build-l',prefix.bin)
# install('bowtie2-build-s',prefix.bin)
# install('bowtie2-inspect',prefix.bin)
# install('bowtie2-inspect-l',prefix.bin)
# install('bowtie2-inspect-s',prefix.bin)
|
skosukhin/spack
|
var/spack/repos/builtin/packages/bowtie2/package.py
|
Python
|
lgpl-2.1
| 3,638
|
[
"Bowtie"
] |
72e79672ecd1a60f27776492b28116373f31515438accbd3e54d68c4fc8747c1
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not islink(path):
for x in walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
import UserDict
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
putenv(key, item)
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
try:
unsetenv
except NameError:
def __delitem__(self, key):
del self.data[key.upper()]
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key.upper()]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key.upper(), *args)
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
else: # Where Env Var Names Can Be Mixed Case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
self.data = environ
def __setitem__(self, key, item):
putenv(key, item)
self.data[key] = item
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
try:
unsetenv
except NameError:
pass
else:
def __delitem__(self, key):
unsetenv(key)
del self.data[key]
def clear(self):
for key in self.data.keys():
unsetenv(key)
del self.data[key]
def pop(self, key, *args):
unsetenv(key)
return self.data.pop(key, *args)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
def _exists(name):
try:
eval(name)
return True
except NameError:
return False
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import warnings
msg = "os.popen2 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import warnings
msg = "os.popen3 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import warnings
msg = "os.popen4 is deprecated. Use the subprocess module."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
import copy_reg as _copy_reg
def _make_stat_result(tup, dict):
return stat_result(tup, dict)
def _pickle_stat_result(sr):
(type, args) = sr.__reduce__()
return (_make_stat_result, args)
try:
_copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result)
except NameError: # stat_result may not exist
pass
def _make_statvfs_result(tup, dict):
return statvfs_result(tup, dict)
def _pickle_statvfs_result(sr):
(type, args) = sr.__reduce__()
return (_make_statvfs_result, args)
try:
_copy_reg.pickle(statvfs_result, _pickle_statvfs_result,
_make_statvfs_result)
except NameError: # statvfs_result may not exist
pass
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
try:
bs = b""
while n - len(bs) >= 1:
bs += read(_urandomfd, n - len(bs))
finally:
close(_urandomfd)
return bs
|
MicroTrustRepos/microkernel
|
src/l4/pkg/python/contrib/Lib/os.py
|
Python
|
gpl-2.0
| 26,337
|
[
"VisIt"
] |
d49d3b0d4a54fd83062bb8737184d545dcbc9e4ac5bd82af4bd057b5f9890b1d
|
"""
A script to prepare/execute/analyse FHI-aims polarisation calculations
@author Tomas Lazauskas, 2016
@web www.lazauskas.net
@email tomas.lazauskas[a]gmail.com
"""
import os
from optparse import OptionParser
import source.IO as IO
import source.Messages as Messages
from source.Messages import log
# a directory where systems in the xyz format and control.in file should be put
_input_directory = "input"
# input file file extension
_input_extension = "xyz"
# a directory to save the prepared input files for the simulations
_output_directory = "output"
# output directory prefix
_output_prefix = "spin_ini_"
# fhi-aims control file name
_aims_control = "control.in"
# fhi-aims geometry file name
_aims_geometry = "geometry.in"
# default_initial_moment keyword in the control.in file
_aims_keyword_def_ini_moment = "default_initial_moment"
# fhiaims execution command (as an example)
_aims_exe_cmd = "source /opt/intel/composer_xe_2015/mkl/bin/mklvars.sh intel64; mpirun -n 8 /Users/Tomas/Software/fhi-aims.160328/bin/aims.160328_1.mpi.x > fhiaims.out"
def cmd_line_args():
"""
Handles command line arguments and options.
"""
usage = "usage: %prog "
parser = OptionParser(usage=usage)
parser.add_option("-a", "--spinfr", dest="spinfr", default=0.0, type="float",
help="Initial spin values from")
parser.add_option("-b", "--spinto", dest="spinto", default=0.0, type="float",
help="Initial spinn values to")
parser.add_option("-p", "--prepare", dest="prepare", action="store_true", default=False,
help="Prepares directories for the spin calculations")
parser.add_option("-x", "--execute", dest="execute", action="store_true", default=False,
help="Executes the simulations")
# parser.add_option("-l", "--analyse", dest="analyse", action="store_true", default=False,
# help="Analyses the simulations")
parser.disable_interspersed_args()
(options, args) = parser.parse_args()
return options, args
def execute():
"""
Executes the fhi-aims calculations
"""
main_dir_path = os.getcwd()
Messages.log(__name__, "Running the simulations in: %s" % (_output_directory))
dir_list = IO.get_dir_list(_aims_geometry)
cwd = os.getcwd()
dir_tot_str = str(len(dir_list))
dir_cnt = 1
for dir_path in dir_list:
Messages.log(__name__, "%s/%s Executing FHI-aims in: %s " % (str(dir_cnt), dir_tot_str, dir_path), 1)
os.chdir(dir_path)
os.system(_aims_exe_cmd)
os.chdir(cwd)
dir_cnt += 1
Messages.log(__name__, "Finished executing FHI-aims calculations")
def prepare_control_file(ini_spin_value):
"""
Copies the control file and adjusts the "default_initial_moment"
"""
aims_control_temp = "_%s" % (_aims_control)
f_cntlr_in = open(_aims_control, "r")
f_cntlr_out = open(aims_control_temp, "w")
for line in f_cntlr_in:
# change the default initial spin value
if _aims_keyword_def_ini_moment in line:
f_cntlr_out.write("%s %s\n" % (_aims_keyword_def_ini_moment, str(ini_spin_value)))
# write the rest of the lines as they are
else:
f_cntlr_out.write(line)
f_cntlr_in.close()
f_cntlr_out.close()
os.system("mv -f %s %s" % (aims_control_temp, _aims_control))
def prepare_directories(options):
"""
The main method to prepare directories for the spin calculations
"""
Messages.log(__name__, "Preparing directories for the spin calculations")
# reading the input systems
systems_list = read_systems(_input_extension)
# preparing directories with adjusted spin
prepare_spins(systems_list, options)
def prepare_spins(systems_list, options):
"""
Prepares simulation directories for the read systems
"""
main_dir_path = os.getcwd()
Messages.log(__name__, "Preparing the simulation files. The files will be saved in: %s" % (_output_directory))
IO.checkDirectory(_output_directory, createMd=1)
os.chdir(_output_directory)
output_dir_path = os.getcwd()
spins_from = options.spinfr
spins_to = options.spinto
for spin in range(int(spins_from), int(spins_to)+1):
Messages.log(__name__, "Preparing: %s %s" % (_aims_keyword_def_ini_moment, str(spin)), 1)
# creates directories for the systems
spin_dir_name = "%s%s" % (_output_prefix, str(spin))
IO.checkDirectory(spin_dir_name, createMd=1)
os.chdir(spin_dir_name)
# prepares control.in and geometry.in files
write_systems(systems_list, spin, main_dir_path)
os.chdir(output_dir_path)
os.chdir(main_dir_path)
def read_systems(file_extension):
"""
Reading the input files as system objects
"""
Messages.log(__name__, "Reading in the systems from: %s" % (_input_directory), 1)
systems_list = []
cwd = os.getcwd()
os.chdir(_input_directory)
files_list = IO.get_file_list(file_extension)
for file_name in files_list:
system = IO.readSystemFromFileXYZ(file_name)
systems_list.append(system)
os.chdir(cwd)
Messages.log(__name__, "Read %s systems." % (str(len(systems_list))), 1)
return systems_list
def write_systems(systems_list, spin_file, main_dir_path):
"""
Writing systems as geometry.in files and copy a control file to the system directory
"""
cwd = os.getcwd()
for system in systems_list:
system_name = system.name
IO.checkDirectory(system_name, createMd=1)
os.chdir(system_name)
# writes system as a geometry.in file
_, _ = IO.writeAimsGeometry(system, _aims_geometry)
# copies the control.in file
cmd_line = "cp %s/%s/%s . " % (main_dir_path, _input_directory, _aims_control)
os.system(cmd_line)
# adjusts the control.in file
prepare_control_file(spin_file)
os.chdir(cwd)
if __name__ == "__main__":
Messages.log(__name__, "Running..")
# reading the command line arguments and options
options, _ = cmd_line_args()
# prepare the directories?
if options.prepare:
prepare_directories(options)
# execute the simulations?
if options.execute:
execute()
Messages.log(__name__, "Finished.")
Messages.printAuthor()
|
tomaslaz/KLMC_Analysis
|
DM_FHIaims_Spin_Analysis.py
|
Python
|
gpl-3.0
| 6,238
|
[
"FHI-aims"
] |
3f3432b428f6568e7e91a143515704096eaca9bfb5130300fbd802c35c0b590e
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# context: context needed to annotate a file
from __future__ import absolute_import
import collections
import contextlib
import hashlib
import os
from edenscm.mercurial import (
error,
lock as lockmod,
mdiff,
node,
progress,
pycompat,
scmutil,
util,
)
from edenscm.mercurial.i18n import _
from edenscm.mercurial.pycompat import range
# pyre-fixme[21]: Could not find name `linelog` in `edenscmnative`.
from edenscmnative import linelog as linelogmod
from . import error as faerror, revmap as revmapmod
# given path, get filelog, cached
@util.lrucachefunc
def _getflog(repo, path):
return repo.file(path)
# extracted from mercurial.context.basefilectx.annotate
def _parents(f, follow=True):
# Cut _descendantrev here to mitigate the penalty of lazy linkrev
# adjustment. Otherwise, p._adjustlinkrev() would walk changelog
# from the topmost introrev (= srcrev) down to p.linkrev() if it
# isn't an ancestor of the srcrev.
f._changeid
pl = f.parents()
# Don't return renamed parents if we aren't following.
if not follow:
pl = [p for p in pl if p.path() == f.path()]
# renamed filectx won't have a filelog yet, so set it
# from the cache to save time
for p in pl:
if not "_filelog" in p.__dict__:
p._filelog = _getflog(f._repo, p.path())
return pl
# extracted from mercurial.context.basefilectx.annotate. slightly modified
# so it takes a fctx instead of a pair of text and fctx.
def _decorate(fctx):
text = fctx.data()
linecount = text.count(b"\n")
if text and not text.endswith(b"\n"):
linecount += 1
return ([(fctx, i) for i in range(linecount)], text)
# extracted from mercurial.context.basefilectx.annotate. slightly modified
# so it takes an extra "blocks" parameter calculated elsewhere, instead of
# calculating diff here.
def _pair(parent, child, blocks):
for (a1, a2, b1, b2), t in blocks:
# Changed blocks ('!') or blocks made only of blank lines ('~')
# belong to the child.
if t == "=":
child[0][b1:b2] = parent[0][a1:a2]
return child
# like scmutil.revsingle, but with lru cache, so their states (like manifests)
# could be reused
_revsingle = util.lrucachefunc(scmutil.revsingle)
def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
"""(repo, str, str) -> fctx
get the filectx object from repo, rev, path, in an efficient way.
if resolverev is True, "rev" is a revision specified by the revset
language, otherwise "rev" is a nodeid, or a revision number that can
be consumed by repo.__getitem__.
if adjustctx is not None, the returned fctx will point to a changeset
that introduces the change (last modified the file). if adjustctx
is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
faster for big repos but is incorrect for some cases.
"""
if resolverev and not isinstance(rev, int) and rev is not None:
ctx = _revsingle(repo, rev)
else:
ctx = repo[rev]
# If we don't need to adjust the linkrev, create the filectx using the
# changectx instead of using ctx[path]. This means it already has the
# changectx information, so blame -u will be able to look directly at the
# commitctx object instead of having to resolve it by going through the
# manifest. In a lazy-manifest world this can prevent us from downloading a
# lot of data.
if adjustctx is None:
# ctx.rev() is None means it's the working copy, which is a special
# case.
if ctx.rev() is None:
fctx = ctx[path]
else:
fctx = repo.filectx(path, changeid=ctx.rev())
else:
fctx = ctx[path]
if adjustctx == "linkrev":
introrev = fctx.linkrev()
else:
introrev = fctx.introrev()
if introrev != ctx.rev():
fctx._changeid = introrev
fctx._changectx = repo[introrev]
return fctx
# like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
def encodedir(path):
return (
path.replace(".hg/", ".hg.hg/")
.replace(".l/", ".l.hg/")
.replace(".m/", ".m.hg/")
.replace(".lock/", ".lock.hg/")
)
def hashdiffopts(diffopts):
diffoptstr = str(
sorted((k, getattr(diffopts, k)) for k in mdiff.diffopts.defaults.keys())
).encode("utf-8")
return hashlib.sha1(diffoptstr).hexdigest()[:6]
_defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
class annotateopts(object):
"""like mercurial.mdiff.diffopts, but is for annotate
followrename: follow renames, like "hg annotate -f"
followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
"""
defaults = {"diffopts": None, "followrename": True, "followmerge": True}
def __init__(self, **opts):
for k, v in pycompat.iteritems(self.defaults):
setattr(self, k, opts.get(k, v))
@util.propertycache
def shortstr(self):
"""represent opts in a short string, suitable for a directory name"""
result = ""
if not self.followrename:
result += "r0"
if not self.followmerge:
result += "m0"
if self.diffopts is not None:
assert isinstance(self.diffopts, mdiff.diffopts)
diffopthash = hashdiffopts(self.diffopts)
if diffopthash != _defaultdiffopthash:
result += "i" + diffopthash
return result or "default"
defaultopts = annotateopts()
class _annotatecontext(object):
"""do not use this class directly as it does not use lock to protect
writes. use "with annotatecontext(...)" instead.
"""
def __init__(self, repo, path, linelogpath, revmappath, opts):
self.repo = repo
self.ui = repo.ui
self.path = path
self.opts = opts
self.linelogpath = linelogpath
self.revmappath = revmappath
self._linelog = None
self._revmap = None
self._node2path = {} # {str: str}
@property
def linelog(self):
if self._linelog is None:
self._linelog = linelogmod.linelog(pycompat.encodeutf8(self.linelogpath))
return self._linelog
@property
def revmap(self):
if self._revmap is None:
self._revmap = revmapmod.revmap(pycompat.encodeutf8(self.revmappath))
return self._revmap
def close(self):
if self._revmap is not None:
self._revmap.flush()
self._revmap = None
if self._linelog is not None:
self._linelog.close()
self._linelog = None
__del__ = close
def rebuild(self):
"""delete linelog and revmap, useful for rebuilding"""
self.close()
self._node2path.clear()
_unlinkpaths([self.revmappath, self.linelogpath])
@property
def lastnode(self):
"""return last node in revmap, or None if revmap is empty"""
if self._revmap is None:
# fast path, read revmap without loading its full content
return revmapmod.getlastnode(self.revmappath)
else:
return self._revmap.rev2hsh(self._revmap.maxrev)
def isuptodate(self, master, strict=True):
"""return True if the revmap / linelog is up-to-date, or the file
does not exist in the master revision. False otherwise.
it tries to be fast and could return false negatives, because of the
use of linkrev instead of introrev.
useful for both server and client to decide whether to update
fastannotate cache or not.
if strict is True, even if fctx exists in the revmap, but is not the
last node, isuptodate will return False. it's good for performance - no
expensive check was done.
if strict is False, if fctx exists in the revmap, this function may
return True. this is useful for the client to skip downloading the
cache if the client's master is behind the server's.
"""
lastnode = self.lastnode
try:
f = self._resolvefctx(master, resolverev=True)
# choose linkrev instead of introrev as the check is meant to be
# *fast*.
linknode = self.repo.changelog.node(f.linkrev())
if not strict and lastnode and linknode != lastnode:
# check if f.node() is in the revmap. note: this loads the
# revmap and can be slow.
return self.revmap.hsh2rev(linknode) is not None
# avoid resolving old manifest, or slow adjustlinkrev to be fast,
# false negatives are acceptable in this case.
return linknode == lastnode
except LookupError:
# master does not have the file, or the revmap is ahead
return True
def annotate(self, rev, master=None, showpath=False, showlines=False):
"""incrementally update the cache so it includes revisions in the main
branch till 'master'. and run annotate on 'rev', which may or may not be
included in the main branch.
if master is None, do not update linelog.
the first value returned is the annotate result, it is [(node, linenum)]
by default. [(node, linenum, path)] if showpath is True.
if showlines is True, a second value will be returned, it is a list of
corresponding line contents.
"""
# the fast path test requires commit hash, convert rev number to hash,
# so it may hit the fast path. note: in the "fctx" mode, the "annotate"
# command could give us a revision number even if the user passes a
# commit hash.
if isinstance(rev, int):
rev = node.hex(self.repo.changelog.node(rev))
# fast path: if rev is in the main branch already
directly, revfctx = self.canannotatedirectly(rev)
if directly:
if self.ui.debugflag:
self.ui.debug(
"fastannotate: %s: using fast path "
"(resolved fctx: %s)\n"
% (self.path, util.safehasattr(revfctx, "node"))
)
return self.annotatedirectly(revfctx, showpath, showlines)
# resolve master
masterfctx = None
if master:
try:
masterfctx = self._resolvefctx(master, resolverev=True, adjustctx=True)
except LookupError: # master does not have the file
pass
else:
if masterfctx in self.revmap: # no need to update linelog
masterfctx = None
# ... - @ <- rev (can be an arbitrary changeset,
# / not necessarily a descendant
# master -> o of master)
# |
# a merge -> o 'o': new changesets in the main branch
# |\ '#': revisions in the main branch that
# o * exist in linelog / revmap
# | . '*': changesets in side branches, or
# last master -> # . descendants of master
# | .
# # * joint: '#', and is a parent of a '*'
# |/
# a joint -> # ^^^^ --- side branches
# |
# ^ --- main branch (in linelog)
# these DFSes are similar to the traditional annotate algorithm.
# we cannot really reuse the code for perf reason.
# 1st DFS calculates merges, joint points, and needed.
# "needed" is a simple reference counting dict to free items in
# "hist", reducing its memory usage otherwise could be huge.
initvisit = [revfctx]
if masterfctx:
if masterfctx.rev() is None:
raise error.Abort(
_("cannot update linelog to wdir()"),
hint=_("set fastannotate.mainbranch"),
)
initvisit.append(masterfctx)
visit = initvisit[:]
pcache = {}
needed = {revfctx: 1}
hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
while visit:
f = visit.pop()
if f in pcache or f in hist:
continue
if f in self.revmap: # in the old main branch, it's a joint
llrev = self.revmap.hsh2rev(f.node())
self.linelog.annotate(llrev)
result = self.linelog.annotateresult
hist[f] = (result, f.data())
continue
pl = self._parentfunc(f)
pcache[f] = pl
for p in pl:
needed[p] = needed.get(p, 0) + 1
if p not in pcache:
visit.append(p)
# 2nd (simple) DFS calculates new changesets in the main branch
# ('o' nodes in # the above graph), so we know when to update linelog.
newmainbranch = set()
f = masterfctx
while f and f not in self.revmap:
newmainbranch.add(f)
pl = pcache[f]
if pl:
f = pl[0]
else:
f = None
break
# f, if present, is the position where the last build stopped at, and
# should be the "master" last time. check to see if we can continue
# building the linelog incrementally. (we cannot if diverged)
if masterfctx is not None:
self._checklastmasterhead(f)
if self.ui.debugflag:
if newmainbranch:
self.ui.debug(
"fastannotate: %s: %d new changesets in the main"
" branch\n" % (self.path, len(newmainbranch))
)
elif not hist: # no joints, no updates
self.ui.debug(
"fastannotate: %s: linelog cannot help in "
"annotating this revision\n" % self.path
)
# prepare annotateresult so we can update linelog incrementally
self.linelog.annotate(self.linelog.maxrev)
# 3rd DFS does the actual annotate
visit = initvisit[:]
with progress.bar(
self.ui, _("building cache"), total=len(newmainbranch)
) as prog:
while visit:
f = visit[-1]
if f in hist:
visit.pop()
continue
ready = True
pl = pcache[f]
for p in pl:
if p not in hist:
ready = False
visit.append(p)
if not ready:
continue
visit.pop()
blocks = None # mdiff blocks, used for appending linelog
ismainbranch = f in newmainbranch
# curr is the same as the traditional annotate algorithm,
# if we only care about linear history (do not follow merge),
# then curr is not actually used.
assert f not in hist
curr = _decorate(f)
for i, p in enumerate(pl):
bs = list(self._diffblocks(hist[p][1], curr[1]))
if i == 0 and ismainbranch:
blocks = bs
curr = _pair(hist[p], curr, bs)
if needed[p] == 1:
del hist[p]
del needed[p]
else:
needed[p] -= 1
hist[f] = curr
del pcache[f]
if ismainbranch: # need to write to linelog
prog.value += 1
bannotated = None
if len(pl) == 2 and self.opts.followmerge: # merge
bannotated = curr[0]
if blocks is None: # no parents, add an empty one
blocks = list(self._diffblocks(b"", curr[1]))
self._appendrev(f, blocks, bannotated)
elif showpath: # not append linelog, but we need to record path
self._node2path[f.node()] = f.path()
result = [
((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
for fr, l in hist[revfctx][0]
] # [(node, linenumber)]
return self._refineannotateresult(result, revfctx, showpath, showlines)
def canannotatedirectly(self, rev):
"""(str) -> bool, fctx or node.
return (True, f) if we can annotate without updating the linelog, pass
f to annotatedirectly.
return (False, f) if we need extra calculation. f is the fctx resolved
from rev.
"""
result = True
f = None
if not isinstance(rev, int) and rev is not None:
hsh = {20: bytes, 40: node.bin}.get(len(rev), lambda x: None)(rev)
if hsh is not None and (hsh, self.path) in self.revmap:
f = hsh
if f is None:
adjustctx = "linkrev" if self._perfhack else True
f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
result = f in self.revmap
if not result and self._perfhack:
# redo the resolution without perfhack - as we are going to
# do write operations, we need a correct fctx.
f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
return result, f
def annotatealllines(self, rev, showpath=False, showlines=False):
"""(rev : str) -> [(node : str, linenum : int, path : str)]
the result has the same format with annotate, but include all (including
deleted) lines up to rev. call this after calling annotate(rev, ...) for
better performance and accuracy.
"""
revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
# find a chain from rev to anything in the mainbranch
if revfctx not in self.revmap:
chain = [revfctx]
a = ""
while True:
f = chain[-1]
pl = self._parentfunc(f)
if not pl:
break
if pl[0] in self.revmap:
a = pl[0].data()
break
chain.append(pl[0])
# both self.linelog and self.revmap is backed by filesystem. now
# we want to modify them but do not want to write changes back to
# files. so we create in-memory objects and copy them. it's like
# a "fork".
linelog = linelogmod.linelog()
linelog.copyfrom(self.linelog)
linelog.annotate(linelog.maxrev)
revmap = revmapmod.revmap()
revmap.copyfrom(self.revmap)
for f in reversed(chain):
b = f.data()
blocks = list(self._diffblocks(a, b))
self._doappendrev(linelog, revmap, f, blocks)
a = b
else:
# fastpath: use existing linelog, revmap as we don't write to them
linelog = self.linelog
revmap = self.revmap
lines = linelog.getalllines()
hsh = revfctx.node()
llrev = revmap.hsh2rev(hsh)
result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
# cannot use _refineannotateresult since we need custom logic for
# resolving line contents
if showpath:
result = self._addpathtoresult(result, revmap)
if showlines:
linecontents = self._resolvelines(result, revmap, linelog)
result = (result, linecontents)
return result
def _resolvelines(self, annotateresult, revmap, linelog):
"""(annotateresult) -> [line]. designed for annotatealllines.
this is probably the most inefficient code in the whole fastannotate
directory. but we have made a decision that the linelog does not
store line contents. so getting them requires random accesses to
the revlog data, since they can be many, it can be very slow.
"""
# [llrev]
revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
result = [None] * len(annotateresult)
# {(rev, linenum): [lineindex]}
key2idxs = collections.defaultdict(list)
for i in range(len(result)):
key2idxs[(revs[i], annotateresult[i][1])].append(i)
while key2idxs:
# find an unresolved line and its linelog rev to annotate
hsh = None
try:
for (rev, _linenum), idxs in pycompat.iteritems(key2idxs):
if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
continue
hsh = annotateresult[idxs[0]][0]
break
except StopIteration: # no more unresolved lines
return result
if hsh is None:
# the remaining key2idxs are not in main branch, resolving them
# using the hard way...
revlines = {}
for (rev, linenum), idxs in pycompat.iteritems(key2idxs):
if rev not in revlines:
hsh = annotateresult[idxs[0]][0]
if self.ui.debugflag:
self.ui.debug(
"fastannotate: reading %s line #%d "
"to resolve lines %r\n"
% (node.short(hsh), linenum, idxs)
)
fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
lines = mdiff.splitnewlines(fctx.data())
revlines[rev] = lines
for idx in idxs:
result[idx] = revlines[rev][linenum]
assert all(x is not None for x in result)
return result
# run the annotate and the lines should match to the file content
self.ui.debug(
"fastannotate: annotate %s to resolve lines\n" % node.short(hsh)
)
linelog.annotate(rev)
fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
annotated = linelog.annotateresult
lines = mdiff.splitnewlines(fctx.data())
if len(lines) != len(annotated):
raise faerror.CorruptedFileError("unexpected annotated lines")
# resolve lines from the annotate result
for i, line in enumerate(lines):
k = annotated[i]
if k in key2idxs:
for idx in key2idxs[k]:
result[idx] = line
del key2idxs[k]
return result
def annotatedirectly(self, f, showpath, showlines):
"""like annotate, but when we know that f is in linelog.
f can be either a 20-char str (node) or a fctx. this is for perf - in
the best case, the user provides a node and we don't need to read the
filelog or construct any filecontext.
"""
if isinstance(f, bytes):
hsh = f
else:
hsh = f.node()
llrev = self.revmap.hsh2rev(hsh)
if not llrev:
raise faerror.CorruptedFileError("%s is not in revmap" % node.hex(hsh))
if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
raise faerror.CorruptedFileError(
"%s is not in revmap mainbranch" % node.hex(hsh)
)
self.linelog.annotate(llrev)
result = [(self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult]
return self._refineannotateresult(result, f, showpath, showlines)
def _refineannotateresult(self, result, f, showpath, showlines):
"""add the missing path or line contents, they can be expensive.
f could be either node or fctx.
"""
if showpath:
result = self._addpathtoresult(result)
if showlines:
if isinstance(f, bytes): # f: node or fctx
llrev = self.revmap.hsh2rev(f)
fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
else:
fctx = f
lines = mdiff.splitnewlines(fctx.data())
if len(lines) != len(result): # linelog is probably corrupted
raise faerror.CorruptedFileError()
result = (result, lines)
return result
def _appendrev(self, fctx, blocks, bannotated=None):
self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
def _diffblocks(self, a, b):
return mdiff.allblocks(a, b, self.opts.diffopts)
@staticmethod
def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
"""append a revision to linelog and revmap"""
def getllrev(f):
"""(fctx) -> int"""
# f should not be a linelog revision
if isinstance(f, int):
raise error.ProgrammingError("f should not be an int")
# f is a fctx, allocate linelog rev on demand
hsh = f.node()
rev = revmap.hsh2rev(hsh)
if rev is None:
rev = revmap.append(hsh, sidebranch=True, path=f.path())
return rev
# append sidebranch revisions to revmap
siderevs = []
siderevmap = {} # node: int
if bannotated is not None:
for (a1, a2, b1, b2), op in blocks:
if op != "=":
# f could be either linelong rev, or fctx.
siderevs += [
f for f, l in bannotated[b1:b2] if not isinstance(f, int)
]
siderevs = set(siderevs)
if fctx in siderevs: # mainnode must be appended seperately
siderevs.remove(fctx)
for f in siderevs:
siderevmap[f] = getllrev(f)
# the changeset in the main branch, could be a merge
llrev = revmap.append(fctx.node(), path=fctx.path())
siderevmap[fctx] = llrev
for (a1, a2, b1, b2), op in reversed(blocks):
if op == "=":
continue
if bannotated is None:
linelog.replacelines(llrev, a1, a2, b1, b2)
else:
blines = [
((r if isinstance(r, int) else siderevmap[r]), l)
for r, l in bannotated[b1:b2]
]
linelog.replacelines_vec(llrev, a1, a2, blines)
def _addpathtoresult(self, annotateresult, revmap=None):
"""(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
if revmap is None:
revmap = self.revmap
def _getpath(nodeid):
path = self._node2path.get(nodeid)
if path is None:
path = revmap.rev2path(revmap.hsh2rev(nodeid))
self._node2path[nodeid] = path
return path
return [(n, l, _getpath(n)) for n, l in annotateresult]
def _checklastmasterhead(self, fctx):
"""check if fctx is the master's head last time, raise if not"""
if fctx is None:
llrev = 0
else:
llrev = self.revmap.hsh2rev(fctx.node())
if not llrev:
raise faerror.CannotReuseError()
if self.linelog.maxrev != llrev:
raise faerror.CannotReuseError()
@util.propertycache
def _parentfunc(self):
"""-> (fctx) -> [fctx]"""
followrename = self.opts.followrename
followmerge = self.opts.followmerge
def parents(f):
pl = _parents(f, follow=followrename)
if not followmerge:
pl = pl[:1]
return pl
return parents
@util.propertycache
def _perfhack(self):
return self.ui.configbool("fastannotate", "perfhack")
def _resolvefctx(self, rev, path=None, **kwds):
return resolvefctx(self.repo, rev, (path or self.path), **kwds)
def _unlinkpaths(paths):
"""silent, best-effort unlink"""
for path in paths:
try:
util.unlink(path)
except OSError:
pass
class pathhelper(object):
"""helper for getting paths for lockfile, linelog and revmap"""
def __init__(self, repo, path, opts=defaultopts):
# different options use different directories
self._vfspath = os.path.join("fastannotate", opts.shortstr, encodedir(path))
self._repo = repo
@property
def dirname(self):
return os.path.dirname(self._repo.localvfs.join(self._vfspath))
@property
def linelogpath(self):
return self._repo.localvfs.join(self._vfspath + ".l")
def lock(self):
return lockmod.lock(
self._repo.localvfs, self._vfspath + ".lock", ui=self._repo.ui
)
@contextlib.contextmanager
def _lockflock(self):
"""the same as 'lock' but use flock instead of lockmod.lock, to avoid
creating temporary symlinks."""
import fcntl
lockpath = self.linelogpath
util.makedirs(os.path.dirname(lockpath))
lockfd = os.open(lockpath, os.O_RDONLY | os.O_CREAT, 0o664)
fcntl.flock(lockfd, fcntl.LOCK_EX)
try:
yield
finally:
fcntl.flock(lockfd, fcntl.LOCK_UN)
os.close(lockfd)
@property
def revmappath(self):
return self._repo.localvfs.join(self._vfspath + ".m")
@contextlib.contextmanager
def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
"""context needed to perform (fast) annotate on a file
an annotatecontext of a single file consists of two structures: the
linelog and the revmap. this function takes care of locking. only 1
process is allowed to write that file's linelog and revmap at a time.
when something goes wrong, this function will assume the linelog and the
revmap are in a bad state, and remove them from disk.
use this function in the following way:
with annotatecontext(...) as actx:
actx. ....
"""
helper = pathhelper(repo, path, opts)
util.makedirs(helper.dirname)
revmappath = helper.revmappath
linelogpath = helper.linelogpath
actx = None
try:
with helper.lock():
actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
if rebuild:
actx.rebuild()
yield actx
except Exception:
if actx is not None:
actx.rebuild()
repo.ui.debug("fastannotate: %s: cache broken and deleted\n" % path)
raise
finally:
if actx is not None:
actx.close()
def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
"""like annotatecontext but get the context from a fctx. convenient when
used in fctx.annotate
"""
repo = fctx._repo
path = fctx._path
if repo.ui.configbool("fastannotate", "forcefollow", False):
follow = True
aopts = annotateopts(diffopts=diffopts, followrename=follow)
return annotatecontext(repo, path, aopts, rebuild)
|
facebookexperimental/eden
|
eden/scm/edenscm/hgext/fastannotate/context.py
|
Python
|
gpl-2.0
| 31,541
|
[
"VisIt"
] |
96232788d50f7b9a15c54eb6722e543aad6f5c59e5603f012f576f2caa532cf8
|
"""
.. module:: Pfn
:synopsis: ProcessPool and related classes
ProcessPool
ProcessPool creates a pool of worker subprocesses to handle a queue of tasks
much like the producers/consumers paradigm. Users just need to fill the queue
with tasks to be executed and worker tasks will execute them.
To construct ProcessPool one first should call its constructor::
pool = ProcessPool( minSize, maxSize, maxQueuedRequests )
where parameters are:
:param int minSize: at least <minSize> workers will be alive all the time
:param int maxSize: no more than <maxSize> workers will be alive all the time
:param int maxQueuedRequests: size for request waiting in a queue to be executed
In case another request is added to the full queue, the execution will
lock until another request is taken out. The ProcessPool will automatically increase and
decrease the pool of workers as needed, of course not exceeding above limits.
To add a task to the queue one should execute:::
pool.createAndQueueTask( funcDef,
args = ( arg1, arg2, ... ),
kwargs = { "kwarg1" : value1, "kwarg2" : value2 },
callback = callbackDef,
exceptionCallback = exceptionCallBackDef )
or alternatively by using ProcessTask instance:::
task = ProcessTask( funcDef,
args = ( arg1, arg2, ... )
kwargs = { "kwarg1" : value1, .. },
callback = callbackDef,
exceptionCallback = exceptionCallbackDef )
pool.queueTask( task )
where parameters are:
:param funcDef: callable by object definition (function, lambda, class with __call__ slot defined
:param list args: argument list
:param dict kwargs: keyword arguments dictionary
:param callback: callback function definition
:param exceptionCallback: exception callback function definition
The callback, exceptionCallbaks and the parameters are all optional. Once task has been added to the pool,
it will be executed as soon as possible. Worker subprocesses automatically return the return value of the task.
To obtain those results one has to execute::
pool.processRequests()
This method will process the existing return values of the task, even if the task does not return
anything. This method has to be called to clean the result queues. To wait until all the requests are finished
and process their result call::
pool.processAllRequests()
This function will block until all requests are finished and their result values have been processed.
It is also possible to set the ProcessPool in daemon mode, in which all results are automatically
processed as soon they are available, just after finalization of task execution. To enable this mode one
has to call::
pool.daemonize()
Callback functions
There are two types of callbacks that can be executed for each tasks: exception callback function and
results callback function. The first one is executed when unhandled exception has been raised during
task processing, and hence no task results are available, otherwise the execution of second callback type
is performed.
The callbacks could be attached in a two places:
- directly in ProcessTask, in that case those have to be shelvable/picklable, so they should be defined as
global functions with the signature :callback( task, taskResult ): where :task: is a :ProcessTask:
reference and :taskResult: is whatever task callable it returning for results callback and
:exceptionCallback( task, exc_info): where exc_info is a
:S_ERROR( "Exception": { "Value" : exceptionName, "Exc_info" : exceptionInfo ):
- in ProcessPool, in that case there is no limitation on the function type, except the signature, which
should follow :callback( task ): or :exceptionCallback( task ):, as those callbacks definitions
are not put into the queues
The first types of callbacks could be used in case various callable objects are put into the ProcessPool,
so you probably want to handle them differently depending on their results, while the second types are for
executing same type of callables in subprocesses and hence you are expecting the same type of results
everywhere.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import errno
import inspect
import multiprocessing
import os
import signal
import sys
import threading
import time
import queue
try:
from DIRAC.FrameworkSystem.Client.Logger import gLogger
except ImportError:
gLogger = None
try:
from DIRAC.Core.Utilities.LockRing import LockRing
except ImportError:
LockRing = None
try:
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
except ImportError:
def S_OK(val=""):
"""dummy S_OK"""
return {"OK": True, "Value": val}
def S_ERROR(mess):
"""dummy S_ERROR"""
return {"OK": False, "Message": mess}
sLog = gLogger.getSubLogger(__name__)
class WorkingProcess(multiprocessing.Process):
"""
.. class:: WorkingProcess
WorkingProcess is a class that represents activity that runs in a separate process.
It is running main thread (process) in daemon mode, reading tasks from :pendingQueue:, executing
them and pushing back tasks with results to the :resultsQueue:. If task has got a timeout value
defined a separate threading.Timer thread is started killing execution (and destroying worker)
after :ProcessTask.__timeOut: seconds.
Main execution could also terminate in a few different ways:
* on every failed read attempt (from empty :pendingQueue:), the idle loop counter is increased,
worker is terminated when counter is reaching a value of 10;
* when stopEvent is set (so ProcessPool is in draining mode),
* when parent process PID is set to 1 (init process, parent process with ProcessPool is dead).
"""
def __init__(self, pendingQueue, resultsQueue, stopEvent, keepRunning):
"""c'tor
:param self: self reference
:param pendingQueue: queue storing ProcessTask before exection
:type pendingQueue: multiprocessing.Queue
:param resultsQueue: queue storing callbacks and exceptionCallbacks
:type resultsQueue: multiprocessing.Queue
:param stopEvent: event to stop processing
:type stopEvent: multiprocessing.Event
"""
multiprocessing.Process.__init__(self)
# daemonize
self.daemon = True
# flag to see if task is being treated
self.__working = multiprocessing.Value("i", 0)
# task counter
self.__taskCounter = multiprocessing.Value("i", 0)
# task queue
self.__pendingQueue = pendingQueue
# results queue
self.__resultsQueue = resultsQueue
# stop event
self.__stopEvent = stopEvent
# keep process running until stop event
self.__keepRunning = keepRunning
# placeholder for watchdog thread
self.__watchdogThread = None
# placeholder for process thread
self.__processThread = None
# placeholder for current task
self.task = None
# start yourself at least
self.start()
def __watchdog(self):
"""
Watchdog thread target
Terminating/killing WorkingProcess when parent process is dead
:param self: self reference
"""
while True:
# parent is dead, commit suicide
if os.getppid() == 1:
os.kill(self.pid, signal.SIGTERM)
# wait for half a minute and if worker is still alive use REAL silencer
time.sleep(30)
# now you're dead
os.kill(self.pid, signal.SIGKILL)
# wake me up in 5 seconds
time.sleep(5)
def isWorking(self):
"""
Check if process is being executed
:param self: self reference
"""
return self.__working.value == 1
def taskProcessed(self):
"""
Tell how many tasks have been processed so far
:param self: self reference
"""
return self.__taskCounter
def __processTask(self):
"""
processThread target
:param self: self reference
"""
if self.task:
self.task.process()
def run(self):
"""
Task execution
Reads and executes ProcessTask :task: out of pending queue and then pushes it
to the results queue for callback execution.
:param self: self reference
"""
# start watchdog thread
self.__watchdogThread = threading.Thread(target=self.__watchdog)
self.__watchdogThread.daemon = True
self.__watchdogThread.start()
if LockRing:
# Reset all locks
lr = LockRing()
lr._openAll()
lr._setAllEvents()
# zero processed task counter
taskCounter = 0
# zero idle loop counter
idleLoopCount = 0
# main loop
while True:
# draining, stopEvent is set, exiting
if self.__stopEvent.is_set():
return
# clear task
self.task = None
# read from queue
try:
task = self.__pendingQueue.get(block=True, timeout=10)
except queue.Empty:
# idle loop?
idleLoopCount += 1
# 10th idle loop - exit, nothing to do
if idleLoopCount == 10 and not self.__keepRunning:
return
continue
# toggle __working flag
self.__working.value = 1
# save task
self.task = task
# reset idle loop counter
idleLoopCount = 0
# process task in a separate thread
self.__processThread = threading.Thread(target=self.__processTask)
self.__processThread.start()
timeout = False
noResults = False
# join processThread with or without timeout
if self.task.getTimeOut():
self.__processThread.join(self.task.getTimeOut() + 10)
else:
self.__processThread.join()
# processThread is still alive? stop it!
if self.__processThread.is_alive():
self.task.setResult(S_ERROR(errno.ETIME, "Timed out"))
timeout = True
# if the task finished with no results, something bad happened, e.g.
# undetected timeout
if not self.task.taskResults() and not self.task.taskException():
self.task.setResult(S_ERROR("Task produced no results"))
noResults = True
# check results and callbacks presence, put task to results queue
if self.task.hasCallback() or self.task.hasPoolCallback():
self.__resultsQueue.put(task)
if timeout or noResults:
# The task execution timed out, stop the process to prevent it from running
# in the background
time.sleep(1)
os.kill(self.pid, signal.SIGKILL)
return
# increase task counter
taskCounter += 1
self.__taskCounter = taskCounter
# toggle __working flag
self.__working.value = 0
class ProcessTask(object):
"""Defines task to be executed in WorkingProcess together with its callbacks."""
# taskID
taskID = 0
def __init__(
self,
taskFunction,
args=None,
kwargs=None,
taskID=None,
callback=None,
exceptionCallback=None,
usePoolCallbacks=False,
timeOut=0,
):
"""c'tor
:warning: taskFunction has to be callable: it could be a function, lambda OR a class with
__call__ operator defined. But be carefull with interpretation of args and kwargs, as they
are passed to different places in above cases:
1. for functions or lambdas args and kwargs are just treated as function parameters
2. for callable classess (say MyTask) args and kwargs are passed to class contructor
(MyTask.__init__) and MyTask.__call__ should be a method without parameters, i.e.
MyTask definition should be::
class MyTask:
def __init__( self, *args, **kwargs ):
...
def __call__( self ):
...
:warning: depending on :timeOut: value, taskFunction execution can be forcefully terminated
using SIGALRM after :timeOut: seconds spent, :timeOut: equal to zero means there is no any
time out at all, except those during :ProcessPool: finalization
:param self: self reference
:param mixed taskFunction: definition of callable object to be executed in this task
:param tuple args: non-keyword arguments
:param dict kwargs: keyword arguments
:param int taskID: task id, if not set,
:param int timeOut: estimated time to execute taskFunction in seconds (default = 0, no timeOut at all)
:param mixed callback: result callback function
:param mixed exceptionCallback: callback function to be fired upon exception in taskFunction
"""
self.__taskFunction = taskFunction
self.__taskArgs = args or []
self.__taskKwArgs = kwargs or {}
self.__taskID = taskID
self.__resultCallback = callback
self.__exceptionCallback = exceptionCallback
self.__timeOut = 0
# set time out
self.setTimeOut(timeOut)
self.__done = False
self.__exceptionRaised = False
self.__taskException = None
self.__taskResult = None
self.__usePoolCallbacks = usePoolCallbacks
def taskResults(self):
"""
Get task results
:param self: self reference
"""
return self.__taskResult
def taskException(self):
"""
Get task exception
:param self: self reference
"""
return self.__taskException
def enablePoolCallbacks(self):
"""
(re)enable use of ProcessPool callbacks
"""
self.__usePoolCallbacks = True
def disablePoolCallbacks(self):
"""
Disable execution of ProcessPool callbacks
"""
self.__usePoolCallbacks = False
def usePoolCallbacks(self):
"""
Check if results should be processed by callbacks defined in the :ProcessPool:
:param self: self reference
"""
return self.__usePoolCallbacks
def hasPoolCallback(self):
"""
Check if asked to execute :ProcessPool: callbacks
:param self: self reference
"""
return self.__usePoolCallbacks
def setTimeOut(self, timeOut):
"""
Set time out (in seconds)
:param self: selt reference
:param int timeOut: new time out value
"""
try:
self.__timeOut = int(timeOut)
return S_OK(self.__timeOut)
except (TypeError, ValueError) as error:
return S_ERROR(str(error))
def getTimeOut(self):
"""
Get timeOut value
:param self: self reference
"""
return self.__timeOut
def hasTimeOutSet(self):
"""
Check if timeout is set
:param self: self reference
"""
return bool(self.__timeOut != 0)
def getTaskID(self):
"""
TaskID getter
:param self: self reference
"""
return self.__taskID
def hasCallback(self):
"""
Callback existence checking
:param self: self reference
:return: True if callback or exceptionCallback has been defined, False otherwise
"""
return self.__resultCallback or self.__exceptionCallback or self.__usePoolCallbacks
def exceptionRaised(self):
"""
Flag to determine exception in process
:param self: self reference
"""
return self.__exceptionRaised
def doExceptionCallback(self):
"""
Execute exceptionCallback
:param self: self reference
"""
if self.__done and self.__exceptionRaised and self.__exceptionCallback:
self.__exceptionCallback(self, self.__taskException)
def doCallback(self):
"""
Execute result callback function
:param self: self reference
"""
if self.__done and not self.__exceptionRaised and self.__resultCallback:
self.__resultCallback(self, self.__taskResult)
def setResult(self, result):
"""
Set taskResult to result
"""
self.__taskResult = result
def process(self):
"""
Execute task
:param self: self reference
"""
self.__done = True
try:
# it's a function?
if inspect.isfunction(self.__taskFunction):
self.__taskResult = self.__taskFunction(*self.__taskArgs, **self.__taskKwArgs)
# or a class?
elif inspect.isclass(self.__taskFunction):
# create new instance
taskObj = self.__taskFunction(*self.__taskArgs, **self.__taskKwArgs)
# ## check if it is callable, raise TypeError if not
if not callable(taskObj):
raise TypeError("__call__ operator not defined not in %s class" % taskObj.__class__.__name__)
# ## call it at least
self.__taskResult = taskObj()
except Exception as x:
self.__exceptionRaised = True
if gLogger:
gLogger.exception("Exception in process of pool")
if self.__exceptionCallback or self.usePoolCallbacks():
retDict = S_ERROR("Exception")
retDict["Value"] = str(x)
retDict["Exc_info"] = sys.exc_info()[1]
self.__taskException = retDict
class ProcessPool(object):
"""
.. class:: ProcessPool
ProcessPool
This class is managing multiprocessing execution of tasks (:ProcessTask: instances) in a separate
sub-processes (:WorkingProcess:).
Pool depth
The :ProcessPool: is keeping required number of active workers all the time: slave workers are only created
when pendingQueue is being filled with tasks, not exceeding defined min and max limits. When pendingQueue is
empty, active workers will be cleaned up by themselves, as each worker has got built in
self-destroy mechanism after 10 idle loops.
Processing and communication
The communication between :ProcessPool: instance and slaves is performed using two :multiprocessing.Queues:
* pendingQueue, used to push tasks to the workers,
* resultsQueue for revert direction;
and one :multiprocessing.Event: instance (stopEvent), which is working as a fuse to destroy idle workers
in a clean manner.
Processing of task begins with pushing it into :pendingQueue: using :ProcessPool.queueTask: or
:ProcessPool.createAndQueueTask:. Every time new task is queued, :ProcessPool: is checking existance of
active and idle workers and spawning new ones when required. The task is then read and processed on worker
side. If results are ready and callback functions are defined, task is put back to the resultsQueue and it is
ready to be picked up by ProcessPool again. To perform this last step one has to call :ProcessPool.processResults:,
or alternatively ask for daemon mode processing, when this function is called again and again in
separate background thread.
Finalisation
Finalization for task processing is done in several steps:
* if pool is working in daemon mode, background result processing thread is joined and stopped
* :pendingQueue: is emptied by :ProcessPool.processAllResults: function, all enqueued tasks are executed
* :stopEvent: is set, so all idle workers are exiting immediately
* non-hanging workers are joined and terminated politelty
* the rest of workers, if any, are forcefully retained by signals: first by SIGTERM, and if is doesn't work
by SIGKILL
:warn: Be carefull and choose wisely :timeout: argument to :ProcessPool.finalize:. Too short time period can
cause that all workers will be killed.
"""
def __init__(
self,
minSize=2,
maxSize=0,
maxQueuedRequests=10,
strictLimits=True,
poolCallback=None,
poolExceptionCallback=None,
keepProcessesRunning=True,
):
"""c'tor
:param self: self reference
:param int minSize: minimal number of simultaniously executed tasks
:param int maxSize: maximal number of simultaniously executed tasks
:param int maxQueueRequests: size of pending tasks queue
:param bool strictLimits: flag to workers overcommitment
:param callable poolCallbak: results callback
:param callable poolExceptionCallback: exception callback
"""
# min workers
self.__minSize = max(1, minSize)
# max workers
self.__maxSize = max(self.__minSize, maxSize)
# queue size
self.__maxQueuedRequests = maxQueuedRequests
# flag to worker overcommit
self.__strictLimits = strictLimits
# pool results callback
self.__poolCallback = poolCallback
# pool exception callback
self.__poolExceptionCallback = poolExceptionCallback
# pending queue
self.__pendingQueue = multiprocessing.Queue(self.__maxQueuedRequests)
# results queue
self.__resultsQueue = multiprocessing.Queue(0)
# stop event
self.__stopEvent = multiprocessing.Event()
# keep processes running flag
self.__keepRunning = keepProcessesRunning
# lock
self.__prListLock = threading.Lock()
# workers dict
self.__workersDict = {}
# flag to trigger workers draining
self.__draining = False
# placeholder for daemon results processing
self.__daemonProcess = False
# create initial workers
self.__spawnNeededWorkingProcesses()
def stopProcessing(self, timeout=10):
"""
Case fire
:param self: self reference
"""
self.finalize(timeout)
def startProcessing(self):
"""
Restart processing again
:param self: self reference
"""
self.__draining = False
self.__stopEvent.clear()
self.daemonize()
def setPoolCallback(self, callback):
"""
Set ProcessPool callback function
:param self: self reference
:param callable callback: callback function
"""
if callable(callback):
self.__poolCallback = callback
def setPoolExceptionCallback(self, exceptionCallback):
"""
Set ProcessPool exception callback function
:param self: self refernce
:param callable exceptionCallback: exsception callback function
"""
if callable(exceptionCallback):
self.__poolExceptionCallback = exceptionCallback
def getMaxSize(self):
"""
MaxSize getter
:param self: self reference
"""
return self.__maxSize
def getMinSize(self):
"""
MinSize getter
:param self: self reference
"""
return self.__minSize
def getNumWorkingProcesses(self):
"""
Count processes currently being executed
:param self: self reference
"""
counter = 0
self.__prListLock.acquire()
try:
counter = len([pid for pid, worker in self.__workersDict.items() if worker.isWorking()])
finally:
self.__prListLock.release()
return counter
def getNumIdleProcesses(self):
"""
Count processes being idle
:param self: self reference
"""
counter = 0
self.__prListLock.acquire()
try:
counter = len([pid for pid, worker in self.__workersDict.items() if not worker.isWorking()])
finally:
self.__prListLock.release()
return counter
def getFreeSlots(self):
"""get number of free slots available for workers
:param self: self reference
"""
return max(0, self.__maxSize - self.getNumWorkingProcesses())
def __spawnWorkingProcess(self):
"""
Create new process
:param self: self reference
"""
self.__prListLock.acquire()
try:
worker = WorkingProcess(self.__pendingQueue, self.__resultsQueue, self.__stopEvent, self.__keepRunning)
while worker.pid is None:
time.sleep(0.1)
self.__workersDict[worker.pid] = worker
finally:
self.__prListLock.release()
def __cleanDeadProcesses(self):
"""
Delete references of dead workingProcesses from ProcessPool.__workingProcessList
"""
# check wounded processes
self.__prListLock.acquire()
try:
for pid, worker in list(self.__workersDict.items()):
if not worker.is_alive():
del self.__workersDict[pid]
finally:
self.__prListLock.release()
def __spawnNeededWorkingProcesses(self):
"""
Create N working process (at least self.__minSize, but no more
than self.__maxSize)
:param self: self reference
"""
self.__cleanDeadProcesses()
# if we're draining do not spawn new workers
if self.__draining or self.__stopEvent.is_set():
return
while len(self.__workersDict) < self.__minSize:
if self.__draining or self.__stopEvent.is_set():
return
self.__spawnWorkingProcess()
while self.hasPendingTasks() and self.getNumIdleProcesses() == 0 and len(self.__workersDict) < self.__maxSize:
if self.__draining or self.__stopEvent.is_set():
return
self.__spawnWorkingProcess()
time.sleep(0.1)
def queueTask(self, task, blocking=True, usePoolCallbacks=False):
"""
Enqueue new task into pending queue
:param self: self reference
:param ProcessTask task: new task to execute
:param bool blocking: flag to block if necessary and new empty slot is available (default = block)
:param bool usePoolCallbacks: flag to trigger execution of pool callbacks (default = don't execute)
"""
if not isinstance(task, ProcessTask):
raise TypeError("Tasks added to the process pool must be ProcessTask instances")
if usePoolCallbacks and (self.__poolCallback or self.__poolExceptionCallback):
task.enablePoolCallbacks()
self.__prListLock.acquire()
try:
self.__pendingQueue.put(task, block=blocking)
except queue.Full:
self.__prListLock.release()
return S_ERROR("Queue is full")
finally:
self.__prListLock.release()
self.__spawnNeededWorkingProcesses()
# throttle a bit to allow task state propagation
time.sleep(0.1)
return S_OK()
def createAndQueueTask(
self,
taskFunction,
args=None,
kwargs=None,
taskID=None,
callback=None,
exceptionCallback=None,
blocking=True,
usePoolCallbacks=False,
timeOut=0,
):
"""
Create new processTask and enqueue it in pending task queue
:param self: self reference
:param mixed taskFunction: callable object definition (FunctionType, LambdaType, callable class)
:param tuple args: non-keyword arguments passed to taskFunction c'tor
:param dict kwargs: keyword arguments passed to taskFunction c'tor
:param int taskID: task Id
:param mixed callback: callback handler, callable object executed after task's execution
:param mixed exceptionCallback: callback handler executed if testFunction had raised an exception
:param bool blocking: flag to block queue if necessary until free slot is available
:param bool usePoolCallbacks: fire execution of pool defined callbacks after task callbacks
:param int timeOut: time you want to spend executing :taskFunction:
"""
task = ProcessTask(taskFunction, args, kwargs, taskID, callback, exceptionCallback, usePoolCallbacks, timeOut)
return self.queueTask(task, blocking)
def hasPendingTasks(self):
"""
Check if taks are present in pending queue
:param self: self reference
:warning: results may be misleading if elements put into the queue are big
"""
return not self.__pendingQueue.empty()
def isFull(self):
"""
Check in peding queue is full
:param self: self reference
:warning: results may be misleading if elements put into the queue are big
"""
return self.__pendingQueue.full()
def isWorking(self):
"""
Check existence of working subprocesses
:param self: self reference
"""
return not self.__pendingQueue.empty() or self.getNumWorkingProcesses()
def processResults(self):
"""
Execute tasks' callbacks removing them from results queue
:param self: self reference
"""
processed = 0
log = sLog.getSubLogger("WorkingProcess")
while True:
if (
not log.debug(
"Start loop (t=0) queue size = %d, processed = %d" % (self.__resultsQueue.qsize(), processed)
)
and processed == 0
and self.__resultsQueue.qsize()
):
log.debug("Process results, queue size = %d" % self.__resultsQueue.qsize())
start = time.time()
self.__cleanDeadProcesses()
log.debug("__cleanDeadProcesses", "t=%.2f" % (time.time() - start))
if not self.__pendingQueue.empty():
self.__spawnNeededWorkingProcesses()
log.debug("__spawnNeededWorkingProcesses", "t=%.2f" % (time.time() - start))
time.sleep(0.1)
if self.__resultsQueue.empty():
if self.__resultsQueue.qsize():
log.warn("Results queue is empty but has non zero size", "%d" % self.__resultsQueue.qsize())
# We only commit suicide if we reach a backlog greater than the maximum number of workers
if self.__resultsQueue.qsize() > self.__maxSize:
return -1
else:
return 0
if processed == 0:
log.debug("Process results, but queue is empty...")
break
# get task
task = self.__resultsQueue.get()
log.debug("__resultsQueue.get", "t=%.2f" % (time.time() - start))
# execute callbacks
try:
task.doExceptionCallback()
task.doCallback()
log.debug("doCallback", "t=%.2f" % (time.time() - start))
if task.usePoolCallbacks():
if self.__poolExceptionCallback and task.exceptionRaised():
self.__poolExceptionCallback(task.getTaskID(), task.taskException())
if self.__poolCallback and task.taskResults():
self.__poolCallback(task.getTaskID(), task.taskResults())
log.debug("__poolCallback", "t=%.2f" % (time.time() - start))
except Exception as error:
log.exception("Exception in callback", lException=error)
pass
processed += 1
if processed:
log.debug("Processed %d results" % processed)
else:
log.debug("No results processed")
return processed
def processAllResults(self, timeout=10):
"""
Process all enqueued tasks at once
:param self: self reference
"""
start = time.time()
while self.getNumWorkingProcesses() or not self.__pendingQueue.empty():
self.processResults()
time.sleep(1)
if time.time() - start > timeout:
break
self.processResults()
def finalize(self, timeout=60):
"""
Drain pool, shutdown processing in more or less clean way
:param self: self reference
:param timeout: seconds to wait before killing
"""
# start drainig
self.__draining = True
# join deamon process
if self.__daemonProcess:
self.__daemonProcess.join(timeout)
# process all tasks
self.processAllResults(timeout)
# set stop event, all idle workers should be terminated
self.__stopEvent.set()
# join idle workers
start = time.time()
log = sLog.getSubLogger("finalize")
nWorkers = 9999999
while self.__workersDict:
self.__cleanDeadProcesses()
if len(self.__workersDict) != nWorkers:
nWorkers = len(self.__workersDict)
log.debug("%d workers still active, timeout = %d" % (nWorkers, timeout))
if timeout <= 0 or time.time() - start >= timeout:
break
time.sleep(0.1)
# second clean up - join and terminate workers
if self.__workersDict:
log.debug(
"After cleaning dead processes, %d workers still active, timeout = %d"
% (len(self.__workersDict), timeout)
)
for worker in self.__workersDict.values():
if worker.is_alive():
worker.terminate()
worker.join(5)
self.__cleanDeadProcesses()
# third clean up - kill'em all!!!
if self.__workersDict:
log.debug(
"After terminating processes, %d workers still active, timeout = %d, kill them"
% (len(self.__workersDict), timeout)
)
self.__filicide()
def __filicide(self):
"""
Kill all workers, kill'em all!
:param self: self reference
"""
while self.__workersDict:
pid = list(self.__workersDict).pop(0)
worker = self.__workersDict[pid]
if worker.is_alive():
os.kill(pid, signal.SIGKILL)
del self.__workersDict[pid]
def daemonize(self):
"""
Make ProcessPool a finite being for opening and closing doors between
chambers.
Also just run it in a separate background thread to the death of
PID 0.
:param self: self reference
"""
if self.__daemonProcess:
return
self.__daemonProcess = threading.Thread(target=self.__backgroundProcess)
self.__daemonProcess.setDaemon(1)
self.__daemonProcess.start()
def __backgroundProcess(self):
"""
Daemon thread target
:param self: self reference
"""
while True:
if self.__draining:
return
self.processResults()
time.sleep(1)
def __del__(self):
"""
Delete slot
:param self: self reference
"""
self.finalize(timeout=10)
|
ic-hep/DIRAC
|
src/DIRAC/Core/Utilities/ProcessPool.py
|
Python
|
gpl-3.0
| 35,860
|
[
"DIRAC"
] |
180ecafa21ab8ba11ef9aefda06557fec3ddcf00555674dcc0e294c5063eed97
|
# -*- coding: utf-8 -*-
{
"'Cancel' will indicate an asset log entry did not occur": "' Cancel ' будет указывать актива запись протокола не выполнено",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": "Положение, географический район для этого региона. Это может быть положение в иерархии положений, а группу ' location ', либо расположение, в граничные в области.",
"Acronym of the organization's name, eg. IFRC.": 'Акронима организации его имя, например: Мфокк.',
"Authenticate system's Twitter account": 'Аутентификация счета системы Twitter',
"Can't import tweepy": 'Невозможно импортировать tweepy',
"Caution: doesn't respect the framework rules!": 'Предостережение: не соблюдаются правила структуры!',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Отформатируйте список значений атрибутов и значение RGB, чтобы использовать для этих, как объект JSON, например: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}",
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Если выбрано, то расположение этого ресурса будет обновляться всякий раз когда обновляется расположение личности.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Если эта конфигурация представляет регион в Области меню, присвойте ему имя используемое в меню. Имя для личного конфигурации отображения будет присвоено имя пользователя.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Если это поле заполняется то пользователь которых задает этот организацией при подписании $tag будет назначен в качестве сотрудник этой организации если их домена не соответствует домену поле.',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Если это настороже, то это станет пользователя Базы расположение& поэтому когда пользователь показано на карте',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": 'Если вы не видите больницы в списке, можно добавить новую, нажав ссылку " добавить Больницу \'.',
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "Если вы не видите офиса в списке, можно добавить новый, нажав ссылку 'добавить офис'.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Если вы не видите организации в списке, можно добавить новый нажав ссылку " добавить организацией ".',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Вместо автоматически синхронизация из других peers по сети, можно также синхронизировать из файлов, которые нужно где есть сеть отсутствует. Можно на этой странице можно импортировать синхронизовать данные из файлов и экспортировать данные в несинхронизированных файлов. Щелкните ссылку на право на перейти к этой странице.',
"Level is higher than parent's": 'Уровень выше родительского',
"Need a 'url' argument!": "Требуется аргумент ' url '!",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": 'Это необязательный параметр. - имя geometry столбцу. В PostGIS по умолчанию " the_geom\'.',
"Parent level should be higher than this record's level. Parent level is": 'Родительский уровень должен быть выше, чем уровень данной записи. Родительский уровень является',
"Password fields don't match": 'Поля пароля не соответствуют',
"Phone number to donate to this organization's relief efforts.": 'Номер телефона для пожертвования на усилия этой организации по оказанию помощи.',
"Please come back after sometime if that doesn't help.": 'Пожалуйста, вернитесь через некоторое время, если не это помогает.',
"Quantity in %s's Inventory": 'Количество в% s складского',
"Select a Room from the list or click 'Create Room'": 'Выбор Места из списка или нажмите кнопку " добавить Комнаты "',
"Select a person in charge for status 'assigned'": 'Выберите ответственное лицо для статуса " назначен "',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Выберите этот если все расположения необходимо родительского в глубочайшие уровня в иерархии положений. Например, если ' окружного ' - это наименьшее деление в иерархии, то все расположения необходимо будет иметь в районном таблицу как родительскую.",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": "Выберите этот если все расположения необходимо родительского расположения в иерархии положений. Это может оказать помощь в настройка ' region ', пострадавшем районе.",
"Sorry, things didn't get done on time.": 'Извините, что не get выполняется на время.',
"Sorry, we couldn't find that page.": 'К сожалению, мы не удалось найти этой страницы.',
"System's Twitter account updated": "Системная учётная запись Twitter'а обновлена",
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": '- доноров (ы) для этого проекта. Несколько значений можно выбрать удерживая нажатой " контроль " ключа.',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'URL - адрес файла изображения. Если вы не загрузите изображение файл, то необходимо указать его расположение здесь.',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Для поиска по имени пользователя, введите любой из первого, средний или фамилии, разделенных пробелами. Можно использовать% как символ подстановки. Нажмите ' Search ' без ввода в список всех сотрудников.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "Для поиска органа, введите код номер тега этого органа. Можно использовать% как символ подстановки. Нажмите ' Search ' без ввода для просмотра списка всех органов.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Для поиска больницы, введите любое из имен или ID больницы, или имя организации, или акроним, разделенные пробелами. Можно использовать% в качестве группового символа. Нажмите ' Search ' без ввода для вывода списка всех больниц.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Для поиска больницы, введите любое из имен или ID этой больницы, разделенное пробелами. Можно использовать% в качестве группового символа. Нажмите ' Search ' без ввода для вывода списка всех больниц.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Для поиска расположения, введите имя. Можно использовать% в качестве группового символа. Нажмите ' Search ' без ввода для вывода списка всех расположений",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Для поиска лица, введите на любой из первого, средний или фамилии и/или о ид пользователя, разделенных пробелами. Можно использовать% как символ подстановки. Нажмите ' Search ' без ввода в список всех сотрудников.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "Для поиска оценки, введите любую часть номера мандата этой оценки. Можно использовать% в качестве группового символа. Нажмите ' Search ' без ввода для вывода списка всех оценок.",
"Type the first few characters of one of the Person's names.": 'Введите несколько первых символов одного из имен данной личности.',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Загрузить файл изображения здесь. Если вы не загрузите изображение в файл, то необходимо указать ее расположение в поле URL.',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Если синхронизация данных с другими, конфликты возникают в случаях когда два (или более) стороны получить информации о синхронизации которых оба из них модифицированы, т. противоречивую информацию. Модуль синхронизации пытается разрешить такие конфликты автоматически но в некоторых случаях нельзя. В тех случаях, это сделать для разрешения этих конфликтов вручную, щелкните на ссылке в правой части для перехода к этой странице.',
"You haven't made any calculations": 'Вы не выполнили расчеты',
"couldn't be parsed so NetworkLinks not followed.": 'Не может быть распознана таким NetworkLinks не выполнен.',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Включение GroundOverlay или в ScreenOverlay, которые до сих пор не поддерживаются в OpenLayers, может не работать должным образом.',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '\\ " update\\ " - необязательное выражение как \\ " field1=\'newvalue\'\\ ". нельзя изменить или удалить результаты соединения',
'# of International Staff': '# международного персонала',
'# of National Staff': '# национальных сотрудников в',
'# of Vehicles': '# транспортных средств',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nIf тип тзапроса "%(type)s", введите, пожалуйста %(type)s ы на следующем экране.',
'%(system_name)s - Verify Email': '%(system_name)s - Проверка Электронной Почты',
'%.1f km': '% .1f км',
'%s rows deleted': '% s строк удалено',
'%s rows updated': '% s строк обновлено',
'& then click on the map below to adjust the Lat/Lon fields': '& щелкните на ниже карту для настройки Lat/Lon поля',
'* Required Fields': '* обязательные поля',
'0-15 minutes': '0 - 15 минут',
'1 Assessment': '1 Оценка',
'1 location, shorter time, can contain multiple Tasks': '1 расположение, меньшее время, может содержать несколько задач',
'1-3 days': '1-3 дня',
'15-30 minutes': '15 - 30 минут',
'2 different options are provided here currently:': '2 различных варианта подготовлены здесь в настоящее время:',
'2x4 Car': '2x4 Автомобиль',
'30-60 minutes': '30 - 60 минут',
'4-7 days': '4 - 7 дней',
'4x4 Car': 'Полноприводной автомобиль',
'8-14 days': '8 - 14 дней',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Marker, установленный на индивидуальное Location устанавливается тогда, когда существует необходимость переопределить маркер, установленный на Feature Class.',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Например, файлу справочным документом URL или контактного лица для проверки данных. Можно ввести 1. несколько символов имени документа ссылку на существующий документ.',
'A brief description of the group (optional)': 'Краткое описание группы (необязательно)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Файл, загруженный с GPS один, содержащий ряд географических точек в формате XML.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Файл в GPX формате браться из конкретного GPS которого отметки времени можно быть коррелируются с отметки на снимки, они расположены на карте.',
'A library of digital resources, such as photos, documents and reports': 'Библиотека цифровых ресурсов, таких как фотографии, документы и отчеты',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Группа на месте может быть использована для определения пределов пострадавшей территории, если она не находится в пределах одного административного региона.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Группа территорий - это совокупность территорий (часто, объединение административных районов, представляющих составную территориальную единицу).',
'A location group must have at least one member.': 'Территориальная группа должна иметь по крайней мере одного члена.',
'ABOUT THIS MODULE': 'ОБ ЭТОМ МОДУЛЕ',
'ACCESS DATA': 'Данные доступа',
'ANY': 'любой',
'API is documented here': 'API - это описано здесь',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC - 20 Быстрой оценки, модифицированное для Новой Зеландии',
'Abbreviation': 'Аббревиатура',
'Ability to Fill Out Surveys': 'Возможность для заполнения Обследований',
'Ability to customize the list of details tracked at a Shelter': 'Возможность для настройки списка подробной информации отслеживается в Жильем',
'Ability to customize the list of human resource tracked at a Shelter': "Возможность для настройки списка управления людскими ресурсами 'завершенные в Жильем",
'Ability to customize the list of important facilities needed at a Shelter': 'Возможность настраивать в список важных объектов необходимо в Жильем',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Возможность просмотра результатов завершенных и/или частично заполненной Обследований',
'About': 'Сведения о',
'Access denied': 'Доступ запрещен',
'Access to Shelter': 'Доступ к жилью',
'Access to education services': 'Доступ к образовательным услугам',
'Accessibility of Affected Location': 'Возможность доступа к зоне бедствия',
'Account Registered - Please Check Your Email': 'Счет зарегистрирован - Пожалуйста, проверьте свою электронную почту',
'Acronym': 'Акроним',
'Actionable by all targeted recipients': 'Звонках на всех целевых получателей',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Компенсационных только, упражнение участников; осуществление идентификатор должна появиться в<note>',
'Actioned?': 'Выполнять?',
'Actions taken as a result of this request.': 'Меры в результате этого запроса.',
'Actions': 'С действиями',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Активируйте события из шаблонов Сценария для распределения соответствующих ресурсов (людских, активов& средств).',
'Active Problems': 'Текущие проблемы',
'Active': 'текущий',
'Activities matching Assessments:': 'Деятельность, соответствующая оценкам:',
'Activities of boys 13-17yrs before disaster': 'Активность мальчиков 13 - 17 лет до катастрофы',
'Activities of boys 13-17yrs now': 'Деятельность мальчиков 13 - 17лет сейчас',
'Activities of boys <12yrs before disaster': 'Активность мальчиков <12 лет перед катастрофой',
'Activities of boys <12yrs now': 'Активность мальчиков <12 лет сейчас',
'Activities of children': 'Деятельность детей',
'Activities of girls 13-17yrs before disaster': 'Деятельность девочек 13 - 17лет до бедствия',
'Activities of girls 13-17yrs now': 'Деятельность девочек 13 - 17лет сейчас',
'Activities of girls <12yrs before disaster': 'Деятельность девочек <12лет до бедствия',
'Activities of girls <12yrs now': 'Деятельность девочек <12лет в настоящее время',
'Activities': 'Операциях',
'Activity Added': 'Действие добавлено',
'Activity Deleted': 'Действие Удалено',
'Activity Details': 'Сведения о действиях',
'Activity Report': 'Отчет об операциях',
'Activity Reports': 'Отчеты об операциях',
'Activity Type': 'Тип операции',
'Activity Updated': 'Действие Обновлено',
'Activity': 'деятельность',
'Add Activity Type': 'Добавить тип работы',
'Add Address': 'Добавить адрес',
'Add Alternative Item': 'Добавить альтернативной элемент',
'Add Assessment Summary': 'Добавить суммарную оценку',
'Add Assessment': 'Добавить Оценки',
'Add Asset Log Entry - Change Label': 'Добавить актив записи протокола - изменить метки',
'Add Availability': 'Добавить наличие',
'Add Baseline Type': 'Добавить тип базового уровеня',
'Add Baseline': 'Добавление базовый уровень',
'Add Bundle': 'Добавить Комплект',
'Add Camp Service': 'Добавить Лагерную Службу',
'Add Camp Type': 'Добавить тип лагеря',
'Add Camp': 'Добавить Лагерь',
'Add Certificate for Course': 'Добавить курс Сертификата',
'Add Certification': 'Добавить Сертификации',
'Add Competency': 'Добавить компетентность',
'Add Contact': 'Добавить контакт',
'Add Contact Information': 'Добавление контактной информации',
'Add Credential': 'Добавить новое разрешение',
'Add Credentials': 'Добавить идентификационные данные',
'Add Disaster Victims': 'Добавить жертв бедствий',
'Add Distribution.': 'Добавить распределения.',
'Add Document': 'Добавить документ',
'Add Donor': 'Добавить Доноров',
'Add Flood Report': 'Добавить Наводнениями Отчета',
'Add Group Member': 'Добавление члена группы',
'Add Human Resource': 'Добавить людских ресурсов',
'Add Identity': 'Добавить Identity',
'Add Image': 'Добавить изображение',
'Add Impact Type': 'Добавить тип воздействия',
'Add Impact': 'Добавить Влияние',
'Add Item to Catalog': 'Добавить элемент в каталог',
'Add Item to Commitment': 'Добавить элемент к Делу',
'Add Item to Inventory': 'Добавить элемент в Запасов',
'Add Item to Request': 'Добавить элемент в Запрос',
'Add Item to Shipment': 'Добавить элемент к партии товара',
'Add Item': 'Добавить элемент',
'Add Job Role': 'Добавить задание Роль',
'Add Key': 'Добавить ключ',
'Add Kit': 'Добавить Kit',
'Add Level 1 Assessment': 'Добавить уровень 1 Оценки',
'Add Level 2 Assessment': 'Добавить уровень 2 Оценки',
'Add Location': 'Создать размещение',
'Add Log Entry': 'Добавить запись журнала',
'Add Member': 'Добавить элемент',
'Add Membership': 'Добавить членство',
'Add Message': 'Добавить сообщение',
'Add Mission': 'Добавить Миссии',
'Add Need Type': 'Добавить необходимый тип',
'Add Need': 'Добавить Необходимо',
'Add New Assessment Summary': 'Добавить новую сводную оценку',
'Add New Baseline Type': 'Добавить новый Базовый Тип',
'Add New Baseline': 'Добавить новую контрольную версию',
'Add New Budget': 'Добавить новый бюджет',
'Add New Bundle': 'Добавить новый комплект',
'Add New Camp Service': 'Добавить новый Лагере Службы',
'Add New Camp Type': 'Добавить новый Лагере Тип',
'Add New Camp': 'Добавить новый Лагере',
'Add New Cluster Subsector': 'Добавить новый кластер Подсектора',
'Add New Cluster': 'Добавить новый кластер',
'Add New Commitment Item': 'Добавить новые обязательства Элемента',
'Add New Document': 'Добавить новый документ',
'Add New Donor': 'Добавить нового донора',
'Add New Entry': 'Добавить новую запись',
'Add New Event': 'Добавить новое событие',
'Add New Flood Report': 'Добавить новый Наводнениям Отчет',
'Add New Human Resource': 'Добавить новый людских ресурсов',
'Add New Image': 'Добавить новое изображение',
'Add New Impact Type': 'Добавить новый тип воздействия',
'Add New Impact': 'Добавить новое воздействие',
'Add New Item to Kit': 'Добавить новый элемент в комплект',
'Add New Key': 'Добавьте новый ключ',
'Add New Level 1 Assessment': 'Добавить новую оценку уровня 1',
'Add New Level 2 Assessment': 'Добавить новую оценку уровня 2',
'Add New Member': 'Добавить новый элемент',
'Add New Membership': 'Добавить новое членство',
'Add New Need Type': 'Добавить новый необходимый тип',
'Add New Need': 'Добавить новый Необходимо',
'Add New Population Statistic': 'Добавить новый Народонаселению Статистики',
'Add New Problem': 'Добавить новую проблему',
'Add New Rapid Assessment': 'Добавить новый оперативной оценки',
'Add New Received Item': 'Добавить новый полученной номенклатуры',
'Add New Record': 'Добавить новую запись',
'Add New Request Item': 'Добавить новый запрос Элемента',
'Add New Request': 'Добавить новый запрос',
'Add New River': 'Добавить новый Реке',
'Add New Role to User': 'Добавить новую роль для пользователя',
'Add New Scenario': 'Добавить новый Сценарий',
'Add New Sent Item': 'Добавить новый посланный элемент',
'Add New Setting': 'Добавить новую установку',
'Add New Solution': 'Добавить новое решение',
'Add New Staff Type': 'Добавить новый тип персонала',
'Add New Subsector': 'Добавить новый Подсектора',
'Add New Survey Answer': 'Добавить новый Ответ анкеты',
'Add New Survey Question': 'Добавить новое Вопрос анкеты',
'Add New Survey Series': 'Добавить новые серии анкет',
'Add New Survey Template': 'Добавить новый шаблон опроса',
'Add New Team': 'Добавить новую Группу',
'Add New Ticket': 'Добавить новый паспорт',
'Add New Track': 'Добавить новый Отслеживания',
'Add New User to Role': 'Добавить нового пользователя к Роли',
'Add New': 'Добавить новый',
'Add Peer': 'Добавление Однорангового',
'Add Person': 'добавить пользователя',
'Add Photo': 'Добавить Фото',
'Add Population Statistic': 'Добавить Народонаселения Статистики',
'Add Position': 'Добавить положение',
'Add Problem': 'Добавить проблему',
'Add Question': 'Добавить вопрос',
'Add Rapid Assessment': 'Добавить оперативную оценку',
'Add Record': 'Добавить запись',
'Add Reference Document': 'Добавить ссылку Документа',
'Add Report': 'Добавить отчет',
'Add Request': 'требование ADD',
'Add Resource': 'Добавление ресурса',
'Add Section': 'Добавить раздел',
'Add Setting': 'Добавить параметр',
'Add Skill Equivalence': 'Добавить Навык Equivalence',
'Add Skill Provision': 'Добавить Навык Предоставления',
'Add Solution': 'Добавить решение',
'Add Staff Type': 'Добавить тип сотрудников',
'Add Subscription': 'добавить подпись',
'Add Subsector': 'Добавить Подсектора',
'Add Survey Answer': 'Добавить ответ анкеты',
'Add Survey Question': 'Добавить вопрос анкеты',
'Add Survey Series': 'Добавить серию анкеты',
'Add Survey Template': 'Добавить шаблон опроса',
'Add Team Member': 'Добавить элемент',
'Add Team': 'Добавить группу',
'Add Ticket': 'Добавить паспорт',
'Add Training': 'Добавить Обучения',
'Add Unit': 'Добавить подразделение',
'Add Volunteer Availability': 'Добавить Доброволец Доступности',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Добавить ссылку документа такого как файл, URL или контактного лица для проверки данных. Если вы не вводите справочный документ, ваш почтовый будет показан вместо.',
'Add a Volunteer': 'Добавить волонтера',
'Add a new certificate to the catalog.': 'Добавить новый сертификат в каталог.',
'Add a new competency rating to the catalog.': 'Добавить новый competency рейтинг в каталоге.',
'Add a new course to the catalog.': 'Добавить новый курс в каталоге.',
'Add a new job role to the catalog.': 'Добавить новое задание роль в каталоге.',
'Add a new skill provision to the catalog.': 'Добавить новый навык предоставления каталога.',
'Add a new skill to the catalog.': 'Добавить новый навык в каталоге.',
'Add a new skill type to the catalog.': 'Добавить новый тип навыка для каталога.',
'Add new Group': 'Добавить новую группу',
'Add new Individual': 'Добавить новый Отдельных',
'Add new project.': 'Добавить новый проект.',
'Add staff members': 'Добавить сотрудников',
'Add to Bundle': 'Добавить к комплекту',
'Add to budget': 'Добавить в бюджет',
'Add volunteers': 'Добавить добровольцы',
'Add': 'На доба- вление',
'Add/Edit/Remove Layers': 'Добавить/изменить/удалить Уровнями',
'Added to Group': 'Членство добавлено',
'Added to Team': 'Членство добавлено',
'Additional Beds / 24hrs': 'Дополнительные Койки/сутки',
'Address Details': 'Сведения об адресе',
'Address Type': 'Тип адреса',
'Address added': 'Адрес добавлен',
'Address deleted': 'Адрес удален',
'Address updated': 'Адрес обновлен',
'Address': 'Улица, дом',
'Addresses': 'адреса',
'Adequate food and water available': 'Достаточно продовольствию и воде доступны',
'Adequate': 'Достаточно',
'Admin Email': 'E-mail системного администратора',
'Admin Name': 'Имя системного администратора',
'Admin Tel': 'Телефон системного администратора',
'Administration': 'управление',
'Admissions/24hrs': 'Прием/круглосуточно',
'Adolescent (12-20)': 'Подросток (12 - 20)',
'Adolescent participating in coping activities': 'Участие подростков в преодолении деятельности',
'Adult (21-50)': 'Взрослых (21 - 50)',
'Adult ICU': 'Взрослых ICU',
'Adult Psychiatric': 'Взрослых Психиатрических',
'Adult female': 'Взрослых женщин',
'Adult male': 'Взрослых мужчин',
'Adults in prisons': 'Взрослых в тюрьмах',
'Advanced:': 'Дополнительно:',
'Advisory': 'Советник',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'После щелкните по кнопке, набора парными элементы будут показаны по одному из них. Пожалуйста, выберите одно решение от каждой пары вы предпочитаете через другие.',
'Age Group': 'Возрастная группа',
'Age group does not match actual age.': 'Возрастной группы не соответствуют фактическим возраста.',
'Age group': 'Возрастная группа',
'Aggravating factors': 'Отягчающие факторы',
'Agriculture': 'Сельское хозяйство',
'Air Transport Service': 'Службы воздушного транспорта',
'Aircraft Crash': 'Самолеты Сбоя',
'Aircraft Hijacking': 'Угона самолета',
'Airport Closure': 'Аэропорту Закрытия',
'Airspace Closure': 'Воздушное Закрытия',
'Alcohol': 'Алкоголь',
'Alert': 'Тревога',
'All Inbound & Outbound Messages are stored here': 'Все входящие и выходящие сообщений запоминаются здесь',
'All Resources': 'Все ресурсы',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Все данные, предоставленные фондом программного обеспечения Sahana с этого сайта, лицензированы по лицензии Creative Commons Attribution. Тем не менее не все данные взяты из этого источника. Пожалуйста, обратитесь к полю источника для каждой записи.',
'All': 'Со всех сторон',
'Allowed to push': 'Разрешено поместить в стек',
'Allows a Budget to be drawn up': 'Позволяет вытащить бюджет',
'Allows authorized users to control which layers are available to the situation map.': 'Позволяет авторизованным пользователям управлять правами доступа к слоям на ситуационной карте.',
'Alternative Item Details': 'Альтернативные сведения о номенклатуре',
'Alternative Item added': 'Альтернативный элемент добавлен',
'Alternative Item deleted': 'Альтернативный элемент удален',
'Alternative Item updated': 'Альтернативной номенклатуры обновляется',
'Alternative Item': 'Альтернативной номенклатуры',
'Alternative Items': 'Альтернативные номенклатуры',
'Alternative places for studying': 'Альтернативные места для изучения',
'Ambulance Service': 'Служба скорой помощи',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'С VLSI системы, то системы управления складом, сырье отслеживания, управления цепочками поставок, закупок и других активов и возможности управления ресурсами.',
'An item which can be used in place of another item': 'Пункту, который можно использовать вместо другого элемента',
'Analysis of Completed Surveys': 'Анализ законченного анкетирования',
'Animal Die Off': 'Животных Вырубленных Off',
'Animal Feed': 'Feed животных',
'Antibiotics available': 'Антибиотики доступны',
'Antibiotics needed per 24h': 'Антибиотики необходимы на 24 часа',
'Apparent Age': 'Очевидным Возраста',
'Apparent Gender': 'Очевидно Гендерной',
'Application Deadline': 'Приложение Срок',
'Approve': 'утверждение',
'Approved': 'утверждено',
'Approver': 'Утверждающий',
'Arctic Outflow': 'Арктический Оттока',
'Areas inspected': 'Области осмотрены',
'Assessment Details': 'Детали оценки',
'Assessment Reported': 'Оценки доложена',
'Assessment Summaries': 'Сводки оценок',
'Assessment Summary Details': 'Детали суммарной оценки',
'Assessment Summary added': 'Сводная оценка добавлена',
'Assessment Summary deleted': 'Сводная оценка удаленные',
'Assessment Summary updated': 'Сводная оценка обновляется',
'Assessment added': 'Оценки добавлен',
'Assessment admin level': 'Оценки уровня администратора',
'Assessment deleted': 'Оценка удалена',
'Assessment timeline': 'Оценки timeline',
'Assessment updated': 'Оценка обновлена',
'Assessment': 'Оценка',
'Assessments Needs vs. Activities': 'Необходимости оценки сравнивая с показателямим',
'Assessments and Activities': 'Оценки и показатели',
'Assessments': 'Оценок',
'Assessments:': 'Оценки:',
'Asset Details': 'Сведения о ресурсе',
'Asset Log Details': 'Средств подробности журнала',
'Asset Log Empty': 'Актива Журнала Empty',
'Asset Log Entry Added - Change Label': 'Средств записи журнала добавлена - изменить метки',
'Asset Log Entry deleted': 'Основного Журнала запись удалена',
'Asset Log Entry updated': 'Основного Журнала запись обновлена',
'Asset Log': 'Актива Журнала',
'Asset Management': 'Управление ресурсами',
'Asset Number': 'Номер актива',
'Asset added': 'Ресурс добавлен',
'Asset deleted': 'Ресурс удален',
'Asset removed': 'Ресурс удален',
'Asset updated': 'Ресурс обновлен',
'Asset': 'Ресурсы',
'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Имущество - ресурс, который не потебляется, возвращается назад, поэтому его необходимо отслеживать.',
'Assets': 'ресурсы',
'Assign Group': 'Assign Группы',
'Assign Staff': 'Назначения персонала',
'Assign to Org.': 'Assign в Org.',
'Assign to Organization': 'Назначить Организации',
'Assign to Person': 'Назначить для сотрудника',
'Assign to Site': 'Assign на сайт',
'Assign': 'Укажите',
'Assigned By': 'Назначен',
'Assigned To': 'Связано с',
'Assigned to Organization': 'Назначены для Организации',
'Assigned to Person': 'Назначить Лицо',
'Assigned to Site': 'Назначенные на сайт',
'Assigned to': 'Связано с',
'Assigned': 'Присвоено',
'At/Visited Location (not virtual)': 'At/Просмотренную расположение (не виртуальный)',
'Attend to information sources as described in <instruction>': 'Посетить к источникам информации как описано в<instruction>',
'Attribution': 'Возложение',
'Author': 'Автор',
'Availability': 'Доступность',
'Available Alternative Inventories': 'Доступных альтернативных Кадастров',
'Available Beds': 'Доступны Коек',
'Available Inventories': 'Доступны Кадастров',
'Available Messages': 'Доступные сообщения',
'Available Records': 'Доступных записей',
'Available databases and tables': 'Доступные базы данных и таблицы',
'Available for Location': 'Доступны для расположения',
'Available from': 'Доступны из',
'Available in Viewer?': 'Доступные в Viewer?',
'Available until': 'Доступен до',
'Avalanche': 'Лавина',
'Avoid the subject event as per the <instruction>': 'Избегать темы событий как для',
'Background Color for Text blocks': 'Цвета фона для текстовых блоков',
'Background Color': 'Цвет фона',
'Bahai': 'Бехаистов',
'Banana': 'Банан',
'Bank/micro finance': 'Банк/микрофинансирование',
'Barricades are needed': 'Необходимы баррикады',
'Base Layer?': 'Базовый слой?',
'Base Location': 'Базовый участок',
'Base Site Set': 'Базовый сайт Set',
'Baseline Data': 'Baseline Данных',
'Baseline Number of Beds': 'Baseline Число койко мест',
'Baseline Type Details': 'Baseline сведения о типе',
'Baseline Type added': 'Baseline добавлен тип',
'Baseline Type deleted': 'Baseline Тип удален',
'Baseline Type updated': 'Baseline Тип обновления',
'Baseline Type': 'Baseline Тип',
'Baseline Types': 'Baseline Типы',
'Baseline added': 'Baseline добавлен',
'Baseline deleted': 'Baseline удален',
'Baseline number of beds of that type in this unit.': 'Baseline число коек этого типа в этой группы.',
'Baseline updated': 'Baseline обновляется',
'Baselines Details': 'Базовые Сведения',
'Baselines': 'контрольные версии',
'Basic Assessment Reported': 'Основная заявленная оценка',
'Basic Assessment': 'Основная Оценка',
'Basic Details': 'Основные сведения',
'Basic reports on the Shelter and drill-down by region': 'Основные доклады о жилищного строительства и drill - down по регионам',
'Baud rate to use for your modem - The default is safe for most cases': 'Скорость передачи в бодах для модеме по умолчанию используется потокобезопасным в большинстве случаев',
'Baud': 'Бодах',
'Beam': 'Луч',
'Bed Capacity per Unit': 'Кровать Мощности на единицу',
'Bed Capacity': 'Кровать Мощности',
'Bed Type': 'Кровать Тип',
'Bed type already registered': 'Кровать тип уже зарегистрирован',
'Below ground level': 'Ниже уровня земли',
'Beneficiary Type': 'Бенефициарах Тип',
'Biological Hazard': 'Опасности биологического',
'Biscuits': 'Сухое печенье',
'Blizzard': 'снежная буря',
'Blood Type (AB0)': 'Тип крови (ab0)',
'Blowing Snow': 'Снежный вихрь',
'Boat': 'Лодка',
'Bodies found': 'Найденные тела',
'Bodies recovered': 'Восстановленные органы',
'Body Recovery Request': 'Тело требование восстановления',
'Body Recovery Requests': 'Тело требования восстановления',
'Body': 'тело',
'Bomb Explosion': 'Взрыва бомбы',
'Bomb Threat': 'Угрозы взрыва',
'Bomb': 'Бомбы',
'Border Color for Text blocks': 'Границы Кожи для блоков текста',
'Brand Details': 'Марки Сведения',
'Brand added': 'Бренд добавлен',
'Brand deleted': 'Бренд удален',
'Brand updated': 'Бренд обновлен',
'Brand': 'торговым маркам',
'Brands': 'Торговые марки',
'Bricks': 'Кирпичей',
'Bridge Closed': 'Мост Закрыт',
'Bucket': 'Хэш-блок',
'Buddhist': 'Буддист',
'Budget Details': 'Детали бюджета',
'Budget Updated': 'Бюджет Обновлен',
'Budget added': 'Бюджет добавлен',
'Budget deleted': 'Бюджет удален',
'Budget updated': 'Бюджет обновлен',
'Budget': 'Бюджет',
'Budgeting Module': 'Бюджетный модуль',
'Budgets': 'Бюджеты',
'Buffer': 'Буфер',
'Bug': 'Ошибка',
'Building Assessments': 'Оценки строительства',
'Building Collapsed': 'Строение разрушилось',
'Building Name': 'Номер здания',
'Building Safety Assessments': 'Построение Безопасности Оценки',
'Building Short Name/Business Name': 'Построение краткое имя/название предприятия',
'Building or storey leaning': 'Здания или четырехэтажное склоняется',
'Built using the Template agreed by a group of NGOs working together as the': 'Встроенные с помощью этого шаблона к группы нпо совместной работы в',
'Bulk Uploader': "Bulk 'средство",
'Bundle Contents': 'Содержимое комплекта',
'Bundle Details': 'Детали комплекта',
'Bundle Updated': 'Комплект Обновления',
'Bundle added': 'Комплект добавлен',
'Bundle deleted': 'Комплект удален',
'Bundle updated': 'Комплект обновлен',
'Bundle': 'Комплект',
'Bundles': 'комплекты',
'Burn ICU': 'Сжигайте ICU',
'Burn': 'Сжигайте',
'Burned/charred': 'Сожжены/обгоревшее',
'By Facility': 'На Facility',
'By Inventory': 'По реестру',
'CBA Women': 'CBA Женщин',
'CN': 'cn (полное имя)',
'CSS file %s not writable - unable to apply theme!': 'Файл(ы) CSS% не записываемые - невозможно обратиться к теме!',
'Calculate': 'Вычислить',
'Camp Coordination/Management': 'Координирование/ управление лагерем',
'Camp Details': 'Детали лагеря',
'Camp Service Details': 'Лагерь сведения о службе',
'Camp Service added': 'Услуга лагеря добавлена',
'Camp Service deleted': 'Услуга лагеря удалена',
'Camp Service updated': 'Лагерь Службы обновления',
'Camp Service': 'обслуживание лагеря',
'Camp Services': 'Лагерь Services',
'Camp Type Details': 'Лагерь сведения о типе',
'Camp Type added': 'Добавлен тип лагере',
'Camp Type deleted': 'Лагерь Тип удален',
'Camp Type updated': 'Лагерь Тип обновления',
'Camp Type': 'Тип лагере',
'Camp Types and Services': 'Типы и службы лагере',
'Camp Types': 'Типы лагере',
'Camp added': 'Лагерь добавлен',
'Camp deleted': 'Лагерь удален',
'Camp updated': 'Лагерь обновляется',
'Camp': 'Лагерь',
'Camps': 'Лагеря',
'Can only disable 1 record at a time!': 'Можно за один раз дезактивировать 1 запись!',
'Cancel Log Entry': 'Отмена записи журнала',
'Cancel': 'отменить',
'Canceled': 'Аннулировано',
'Candidate Matches for Body %s': 'Кандидат Соответствует для Body% s',
'Canned Fish': 'Готовой Рыбных',
'Cannot be empty': 'Пустое значение недопустимо',
'Cannot disable your own account!': 'Нельзя отключить собственную учетную!',
'Capacity (Max Persons)': 'Емкость (Максимальное лиц)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Сбора данных о Стихийных Бедствий групп жертв (туристы, пассажиры, семьями, и т. )',
'Capture Information on each disaster victim': 'Capture информации по каждому жертвам стихийных бедствий',
'Capturing the projects each organization is providing and where': 'Сбор проектов каждая организация предусматривает и где',
'Cardiology': 'Кардиология',
'Cassava': 'Маниока',
'Casual Labor': 'временная рабочая сила',
'Casualties': 'Потери',
'Catalog Details': 'Детали каталога',
'Catalog Item added': 'Catalog элемент добавлен',
'Catalog Item deleted': 'Catalog элемент удален',
'Catalog Item updated': 'Элемент каталога обновляется',
'Catalog Items': 'Элементы каталога',
'Catalog added': 'Каталог добавлен',
'Catalog deleted': 'Каталог удалён',
'Catalog updated': 'Каталог обновлен',
'Catalog': 'Каталог',
'Catalogs': 'Каталоги',
'Categories': 'Категории',
'Category': 'Категория',
'Ceilings, light fixtures': 'Потолки, легких арматуры',
'Central point to record details on People': 'Центральной точки для записи информации на сотрудников',
'Certificate Catalog': 'Сертификат Каталога',
'Certificate Details': 'Сведения о сертификате',
'Certificate Status': 'Состояние сертификата',
'Certificate added': 'Добавление сертификата',
'Certificate deleted': 'Сертификат удален',
'Certificate updated': 'Сертификат обновляется',
'Certificate': 'Сертификат',
'Certificates': 'сертификаты',
'Certification Details': 'Детали сертификата',
'Certification added': 'Сертификат добавлен',
'Certification deleted': 'Сертификат удален',
'Certification updated': 'Сертификат обновлен',
'Certification': 'Сертификация',
'Certifications': 'Сертификаты',
'Certifying Organization': 'Сертифицирующая организация',
'Change Password': 'Смена пароля',
'Check Request': 'Проверьте запрос',
'Check for errors in the URL, maybe the address was mistyped.': 'Исправьте URL, возможно, в адресе есть опечатка.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Проверьте, не указывает ли URL на каталог вместо веб-страницы',
'Check outbox for the message status': 'Проверьте статус сообщения в исходящих',
'Check to delete': 'Проверьте, чтобы удалить',
'Check': 'Проверить',
'Checked': 'проверено',
'Checklist created': 'Контрольная таблица создана',
'Checklist deleted': 'Контрольная таблица удалена',
'Checklist of Operations': 'Перечень операций',
'Checklist updated': 'Обновленный перечень',
'Checklist': 'контрольная таблица',
'Chemical Hazard': 'Химическая опасности',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Угроза применения или атака с применением химического, биологического, радиологического, ядерного оружия или мощного взрывчатого вещества',
'Chicken': 'Цыпленок',
'Child (2-11)': 'Ребенок (2 - 11)',
'Child (< 18 yrs)': 'Ребенок (< 18 лет)',
'Child Abduction Emergency': 'Критическое положение с похищением ребенка',
'Child headed households (<18 yrs)': 'Домашних хозяйства, возглавляемые детьми (<18 лет)',
'Child': 'Ребенок',
'Children (2-5 years)': 'Детей (2 - 5 лет)',
'Children (5-15 years)': 'Детей (5 - 15 лет)',
'Children (< 2 years)': 'Детей (< 2 лет)',
'Children in adult prisons': 'Детей в тюрьмах для взрослых',
'Children in boarding schools': 'Детей в школах интернатах',
'Children in homes for disabled children': 'Детей в дома для детей инвалидов',
'Children in juvenile detention': 'Детей содержания несовершеннолетних в',
'Children in orphanages': 'Дети в приютах',
'Children living on their own (without adults)': 'Дети, живущие самостоятельно (без взрослых)',
'Children not enrolled in new school': 'Дети не зарегистрированы в новой школе',
'Children orphaned by the disaster': 'Дети осиротели из-за бедствия',
'Children separated from their parents/caregivers': 'Дети разлучены со своими родителями/опекунами',
'Children that have been sent to safe places': 'Дети, которые были отправлены в безопасные места',
'Children who have disappeared since the disaster': 'Дети, которые исчезли с момента аварии',
'Chinese (Taiwan)': 'Китаец (Тайвань)',
'Cholera Treatment Capability': 'Возможность лечения холеры',
'Cholera Treatment Center': 'Центр лечения холеры',
'Cholera Treatment': 'Лечение холеры',
'Cholera-Treatment-Center': 'Центр - лечения - холеры',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Выберите новый разноски на основе новой оценки и группа решения. Серьезная условия, влияющие на всей миростроительства являются служат основанием для НЕБЕЗОПАСНЫЙ разноски. Локализованную Серьезная и в целом средняя условий может потребоваться ОГРАНИЧЕННОГО ИСПОЛЬЗОВАНИЯ. Место ОСМОТРЕЛА легковоспламеняющаяся из главного входа. После всех остальных табло на всех существенных входа.',
'Christian': 'христианин',
'Church': 'Церковь',
'City': 'Город',
'Civil Emergency': 'Гражданских Чрезвычайных',
'Cladding, glazing': 'Герметизируется, стекловые',
'Click on the link %(url)s to reset your password': 'Щелкните по ссылке %(url)s Для сброса пароля',
'Click on the link %(url)s to verify your email': 'Щелкните по ссылке %(url)s чтобы проверить вашу электронную почту',
'Clinical Laboratory': 'Клинической Лаборатории',
'Clinical Operations': 'Отдел клинического применения',
'Clinical Status': 'Клинической Состояние',
'Closed': 'закрытыми',
'Clothing': 'одежда',
'Cluster Details': 'Детали кластера',
'Cluster Distance': 'Расстояние кластера',
'Cluster Subsector Details': 'Подсектора кластера Сведения',
'Cluster Subsector added': 'Подсектор кластера добавлен',
'Cluster Subsector deleted': 'Подсектор кластера удален',
'Cluster Subsector updated': 'Подсектора кластера обновляется',
'Cluster Subsector': 'Подсектор кластера',
'Cluster Subsectors': 'Кластера Подсекторов',
'Cluster Threshold': 'Порог кластера',
'Cluster added': 'Добавлен кластера',
'Cluster deleted': 'Кластер удален',
'Cluster updated': 'Обновление кластера',
'Cluster': 'кластер',
'Cluster(s)': 'Кластер (ы)',
'Clusters': 'Кластеры',
'Code': 'Код',
'Cold Wave': 'Холодной Волну',
'Collapse, partial collapse, off foundation': 'Свернуть, частично свернуть, выкл foundation',
'Collective center': 'Коллективные center',
'Color for Underline of Subheadings': 'Цвета для Underline - Подзаголовках',
'Color of Buttons when hovering': 'Кожи кнопок при наведении',
'Color of bottom of Buttons when not pressed': 'Кожи снизу кнопок при не нажата',
'Color of bottom of Buttons when pressed': 'Кожи в нижней Кнопки при нажатии',
'Color of dropdown menus': 'Кожи - выпадающем меню',
'Color of selected Input fields': 'Кожи выбранных полей ввода',
'Color of selected menu items': 'Кожи выбранных пунктов меню',
'Columns, pilasters, corbels': 'Столбцы, pilasters, corbels',
'Combined Method': 'Сочетании Метод',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Вернуться позже. Все посещения этого сайта возможно испытывает одинаковые проблемы и вы.',
'Come back later.': 'Вернуться позже.',
'Comments': 'Примечания',
'Commercial/Offices': 'Коммерческих/Отделений',
'Commit Date': 'Дата передачи на выполнение',
'Commit from %s': 'Передавать на выполнение от %s',
'Commit': 'Фиксировать',
'Commit Status': 'статус запроса на выполнение',
'Commiting a changed spreadsheet to the database': 'Фиксацией изменения таблицы для базы',
'Commitment Added': 'Делу Добавлен',
'Commitment Canceled': 'Делу Отменена',
'Commitment Details': 'Делу Сведения',
'Commitment Item Details': 'Делу сведения о номенклатуре',
'Commitment Item added': 'Делу элемент добавлен',
'Commitment Item deleted': 'Делу элемент удален',
'Commitment Item updated': 'Делу Элемент обновляется',
'Commitment Items': 'Делу Элементов',
'Commitment Status': 'статус запроса на выполнение',
'Commitment Updated': 'Делу Обновляется',
'Commitment': 'Обязательство',
'Commitments': 'Обязательства',
'Committed By': 'Принятые На',
'Committed': 'Сохранен',
'Committing Inventory': 'Фиксация Запасов',
'Communication problems': 'Неполадки связи',
'Community Centre': 'Центр общины',
'Community Health Center': 'Центр медицинского обслуживания в населенном пункте',
'Community Member': 'Член сообщества',
'Competencies': 'Качеств',
'Competency Details': 'Competency Сведения',
'Competency Rating Catalog': 'Competency Рейтинг Каталога',
'Competency Rating Details': 'Competency Рейтинг Сведения',
'Competency Rating added': 'Competency Рейтинг добавлен',
'Competency Rating deleted': 'Competency Рейтинг удален',
'Competency Rating updated': 'Competency Рейтинг обновляется',
'Competency Ratings': 'Competency Рейтинги',
'Competency added': 'Competency добавлен',
'Competency deleted': 'Competency удален',
'Competency updated': 'Competency обновляется',
'Complete': 'Полная',
'Completed': 'завершено',
'Complexion': 'цвет лица',
'Compose': 'Составить',
'Compromised': 'Скомпрометировано',
'Concrete frame': 'Конкретные фрейма',
'Concrete shear wall': 'Конкретные срезать wall',
'Condition': 'факторы',
'Configurations': 'Конфигурации',
'Configure Run-time Settings': 'Configure Запустите - параметры времени',
'Confirm Shipment Received': 'Подтвердите Поставка Полученных',
'Confirmed': 'Подтверждено',
'Confirming Organization': 'Подтверждение Организации',
'Conflict Details': 'Конфликт Сведения',
'Conflict Resolution': 'Устранение конфликтов',
'Consignment Note': 'Груза Примечание',
'Constraints Only': 'Ограничения Только',
'Consumable': 'Расходуемый',
'Contact Data': 'Данных контактов',
'Contact Details': 'Данные для связи',
'Contact Info': 'Контактная информация',
'Contact Information Added': 'Контактная информация Добавлена',
'Contact Information Deleted': 'Контактная информация Удалена',
'Contact Information Updated': 'Контактная информация обновлена',
'Contact Information': 'Контакты',
'Contact Method': 'Метод контакта',
'Contact Name': 'Имя для контакта',
'Contact Person': 'Контактное лицо',
'Contact Phone': 'Контактный телефон',
'Contact details': 'Данные для связи',
'Contact information added': 'Контактная информация добавлена',
'Contact information deleted': 'Контактная информация удалена',
'Contact information updated': 'Контактная информация обновлена',
'Contact us': 'Свяжитесь с нами',
'Contact': 'Контактное лицо',
'Contacts': 'Контакты',
'Contents': 'Текст',
'Contributor': 'Участник',
'Conversion Tool': 'Утилита преобразования',
'Cooking NFIs': 'Кулинарией NFIs',
'Cooking Oil': 'Кулинарией Нефти',
'Coordinate Conversion': 'Координата Преобразования',
'Coping Activities': 'Преодоления Деятельности',
'Copy': 'скопировать',
'Corn': 'Кукурузный',
'Cost Type': 'Тип стоимости',
'Cost per Megabyte': 'Стоимость за Мегабайт',
'Cost per Minute': 'Стоимость за минуту',
'Country of Residence': 'Страна проживания',
'Country': 'Страна',
'County': 'Округ',
'Course Catalog': 'Каталог курсов',
'Course Certificate Details': 'Курс Сертификата Сведения',
'Course Certificate added': 'Курс Клиент добавлен',
'Course Certificate deleted': 'Курс Клиент удален',
'Course Certificate updated': 'Курса Сертификат обновляется',
'Course Certificates': 'Сертификатов курса',
'Course Details': 'Сведения о курсе',
'Course added': 'Курс добавлен',
'Course deleted': 'Курс удален',
'Course updated': 'Курс обновляется',
'Course': 'Курса',
'Courses': 'Курсы',
'Create & manage Distribution groups to receive Alerts': '&создать управлять группами рассылки получать оповещения',
'Create Activity Report': 'Создать действие Отчета',
'Create Activity Type': 'Создать тип работы',
'Create Activity': 'Создать действие',
'Create Assessment': 'Создать оценку',
'Create Asset': 'Создать ресурс',
'Create Bed Type': 'Создать тип кровати',
'Create Brand': 'Создать Марки',
'Create Budget': 'Создать Бюджета',
'Create Catalog Item': 'Создать элемент каталога',
'Create Catalog': 'Создать каталог',
'Create Certificate': 'Создать сертификат',
'Create Checklist': 'Создавать Контрольный',
'Create Cholera Treatment Capability Information': 'Создать лечению холеры Возможность Информации',
'Create Cluster Subsector': 'Создать кластер Подсектора',
'Create Cluster': 'Создать кластер',
'Create Competency Rating': 'Добавление Competency Рейтинг',
'Create Contact': 'Создать контакт',
'Create Course': 'Создать курс',
'Create Dead Body Report': 'Добавление Недоставленных Сообщений Тело Отчета',
'Create Event': 'Создать новое событие',
'Create Facility': 'Создать средство',
'Create Feature Layer': 'Создать компонент Слой',
'Create Group Entry': 'Создание записи группы',
'Create Group': 'Создать группу',
'Create Hospital': 'Создать Больницу',
'Create Identification Report': 'Создать Код Отчета',
'Create Impact Assessment': 'Создавать оценки воздействия',
'Create Incident Report': 'Создать отчет о происшествии',
'Create Incident': 'Создать Инцидента',
'Create Item Category': 'Создать элемент Категории',
'Create Item Pack': 'Создать элемент Pack',
'Create Item': 'Создатьэлемент',
'Create Kit': 'Создать комплект',
'Create Layer': 'Создать слой',
'Create Location': 'Создать размещение',
'Create Map Profile': 'Создать конфигурации отображения',
'Create Marker': 'Создать маркер',
'Create Member': 'Создать элемент',
'Create Mobile Impact Assessment': 'Создание Мобильных оценки воздействия',
'Create Office': 'Создать офис',
'Create Organization': 'Создать организацию',
'Create Personal Effects': 'Добавлять личные Эффекты',
'Create Project': 'Создать проект',
'Create Projection': 'Добавление проекцию',
'Create Rapid Assessment': 'Создавать оперативной оценки',
'Create Report': 'Создать отчет',
'Create Request': 'Создать запрос',
'Create Resource': 'Добавление ресурса',
'Create River': 'Добавить реку',
'Create Role': 'Создать роль',
'Create Room': 'Создать Комнаты',
'Create Scenario': 'Создать новый сценарий',
'Create Sector': 'Создать сектор',
'Create Service Profile': 'Создать профиль службы',
'Create Shelter Service': 'Создать Жильем Службы',
'Create Shelter Type': 'Создать Жильем Типа',
'Create Shelter': 'Создать Жильем',
'Create Skill Type': 'Создать тип навыка',
'Create Skill': 'Добавить навык',
'Create Staff Member': 'Создать сотрудника',
'Create Status': 'Создать статус',
'Create Task': 'Создать задание',
'Create Theme': 'Добавьте тему',
'Create User': 'Добавление пользователя',
'Create Volunteer': 'Создать Доброволец',
'Create Warehouse': 'Создать Хранилища',
'Create a Person': 'Создать пользователя',
'Create a group entry in the registry.': 'Создать запись группы в реестре.',
'Create, enter, and manage surveys.': 'Создавать, enter, и управлять обследований.',
'Creation of Surveys': 'Создания Обследований',
'Credential Details': 'Данные Мандата',
'Credential added': 'Credential добавлен',
'Credential deleted': 'Credential удален',
'Credential updated': 'Мандат обновлен',
'Credentialling Organization': 'Рекомендующая организация',
'Credentials': 'Мандат',
'Credit Card': 'Кредитная карточка',
'Crime': 'Преступление',
'Criteria': 'Критерии',
'Currency': 'валюта',
'Current Entries': 'Текущие записи',
'Current Group Members': 'Текущих членов группы',
'Current Identities': 'Текущей Идентификаторами',
'Current Location': 'Текущее расположение',
'Current Log Entries': 'Текущей записи журнала',
'Current Memberships': 'Члены в настоящее время',
'Current Records': 'Текущие записи',
'Current Registrations': 'Текущий Регистраций',
'Current Status': 'Текущий статус',
'Current Team Members': 'Текущие члены группы',
'Current Twitter account': 'Текущего Twitter учетной',
'Current community priorities': 'Текущие приоритеты сообщества',
'Current general needs': 'Текущие общие потребности',
'Current greatest needs of vulnerable groups': 'Текущий наибольший потребностей уязвимых групп населения',
'Current health problems': 'Текущее состояние проблемы',
'Current number of patients': 'Количество пациентов в настоящее время',
'Current problems, categories': 'Текущие проблемы, категории',
'Current problems, details': 'Текущие проблемы, детали',
'Current request': 'Текущий запрос',
'Current response': 'Текущий ответ',
'Current session': 'текущая сессия',
'Currently no Certifications registered': 'В настоящее время не Сертификаты зарегистрированными',
'Currently no Competencies registered': 'В настоящее время не Качеств зарегистрированными',
'Currently no Course Certificates registered': 'В настоящее время нет Курса Сертификатов зарегистрированными',
'Currently no Credentials registered': 'В настоящее время нет Учетных зарегистрированными',
'Currently no Missions registered': 'В настоящее время не Миссий зарегистрированными',
'Currently no Skill Equivalences registered': 'В настоящее время нет навыков Эквиваленты зарегистрированными',
'Currently no Trainings registered': 'В настоящее время не &обучения зарегистрированными',
'Currently no entries in the catalog': 'В настоящее время нет записей в каталоге',
'DNA Profile': 'ДНК Профиль',
'DNA Profiling': 'Определение профиля ДНК',
'DVI Navigator': 'DVI Навигатор',
'Dam Overflow': 'Подъем воды над плотиной',
'Damage': 'Ущерб',
'Dangerous Person': 'Опасная личность',
'Dashboard': 'сводные панели',
'Data uploaded': 'Данных закачан',
'Data': 'Данные',
'Database': 'Сервер баз данных',
'Date & Time': 'Дата и время',
'Date Available': 'Дата доступна',
'Date Received': 'Дата получения',
'Date Requested': 'Дата запроса',
'Date Required': 'Дата, когда требуется',
'Date Sent': 'Дата отправления',
'Date Until': 'Дата до',
'Date and Time': 'Дату и время',
'Date and time this report relates to.': 'Дата и время, к которому относится этот отчет.',
'Date of Birth': 'Дата рождения',
'Date of Latest Information on Beneficiaries Reached': 'Даты о последнем Информации на бенефициарах Достижения',
'Date of Report': 'Дата отчета',
'Date': 'дату',
'Date/Time of Find': 'Даты/времени нахождения',
'Date/Time when found': 'Дата/время найти',
'Date/Time when last seen': 'Дата/время последний видимый',
'Date/Time': 'Дата и время',
'De-duplicator': 'Удалитель дубликатов',
'Dead Body Details': 'Описание трупа',
'Dead Body Reports': 'Отчеты об умерших',
'Dead Body': 'Труп',
'Dead body report added': 'Отчет об умершем добавлен',
'Dead body report deleted': 'Отчет об умершем удален',
'Dead body report updated': 'Отчет об умершем обновлен',
'Deaths in the past 24h': 'Погибшие в последние 24 часа',
'Deaths/24hrs': 'Число смертей за сутки',
'Decimal Degrees': 'Десятичные степени',
'Decision': 'РЕШЕНИЕ',
'Decomposed': 'Разобран на составные части',
'Default Height of the map window.': 'Высота по умолчанию окна карты.',
'Default Map': 'По умолчанию Карты',
'Default Marker': 'Маркер по умолчанию',
'Default Width of the map window.': 'Ширина по умолчанию окна карты.',
'Default synchronization policy': 'Политика синхронизации по умолчанию',
'Defecation area for animals': 'Область дефекации для животных',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Определить сценарии для распределения соответствующих ресурсов (людских, активы& Средства).',
'Defines the icon used for display of features on handheld GPS.': 'Определяет значок, применяемый для отображения возможностей на портативных GPS.',
'Defines the icon used for display of features on interactive map & KML exports.': 'Определяет значок, применяемый для отображения возможностей в интерактивном режиме map& KML экспорта.',
'Defines the marker used for display & the attributes visible in the popup.': 'Определяет маркер, используется для отображения и атрибутов отображается в popup.',
'Degrees must be a number between -180 and 180': 'Градусов должен иметь значение между - 180 и 180',
'Delete Alternative Item': 'Удалить альтернативной номенклатуры',
'Delete Assessment Summary': 'Удалить сводную оценку',
'Delete Assessment': 'Удалить оценку',
'Delete Asset Log Entry': 'Удалить актив запись протокола',
'Delete Asset': 'Удалить ресурс',
'Delete Baseline Type': 'Удалить Базовый Тип',
'Delete Baseline': 'Удалить Baseline',
'Delete Brand': 'Delete Марки',
'Delete Budget': 'Удалить бюджет',
'Delete Bundle': 'Удалить Комплект',
'Delete Catalog Item': 'Удалить элемент каталога',
'Delete Catalog': 'Удалить каталог',
'Delete Certificate': 'Удалить сертификат',
'Delete Certification': 'Удалить сертификацию',
'Delete Cluster Subsector': 'Удалить кластер Подсектора',
'Delete Cluster': 'Удалить кластер',
'Delete Commitment Item': 'Delete Делу Элемента',
'Delete Commitment': 'Delete Делу',
'Delete Competency Rating': 'Удалить рейтинг компетентности',
'Delete Competency': 'Удалить компетентность',
'Delete Contact Information': 'Удалить контактную информацию',
'Delete Course Certificate': 'Удалить сертификат курса',
'Delete Course': 'Удалить курс',
'Delete Credential': 'Удалить разрешение',
'Delete Document': 'Удалить документ',
'Delete Donor': 'Удалить донора',
'Delete Entry': 'Удалить запись',
'Delete Event': 'Удалить событие',
'Delete Feature Layer': 'Удалить компонент Слой',
'Delete Group': 'Удалить группу',
'Delete Hospital': 'Удалить Больницу',
'Delete Image': 'Удалить образ',
'Delete Impact Type': 'Удалить тип воздействия',
'Delete Impact': 'Удалить воздействие',
'Delete Incident Report': 'Удалить инцидент Отчета',
'Delete Item Category': 'Удалить элемент Категории',
'Delete Item Pack': 'Удалить элемент Pack',
'Delete Item': 'Удалить элемент',
'Delete Job Role': 'Задание удаления Роли',
'Delete Key': 'удаление ключа',
'Delete Kit': 'Удалить Kit',
'Delete Layer': 'Удалить слой',
'Delete Level 1 Assessment': 'Удалить уровень 1 Оценки',
'Delete Level 2 Assessment': 'Удалить уровень 2 Оценки',
'Delete Location': 'Удалить расположение',
'Delete Map Profile': 'Удалить конфигурацию отображения',
'Delete Marker': 'Удалить маркер',
'Delete Membership': 'Удалить Членство',
'Delete Message': 'Удалить сообщение',
'Delete Mission': 'Delete Миссии',
'Delete Need Type': 'Необходимо удалить Тип',
'Delete Need': 'Необходимо удалить',
'Delete Office': 'Удалить Office',
'Delete Organization': 'Удалить организацию',
'Delete Peer': 'Удалить Однорангового',
'Delete Person': 'удалить пользователя',
'Delete Photo': 'Удалить Фото',
'Delete Population Statistic': 'Удалить Народонаселения Статистики',
'Delete Position': 'Удалить Позицию',
'Delete Project': 'Удалить проект',
'Delete Projection': 'Удалить Проекционная',
'Delete Rapid Assessment': 'Удалить оперативной оценки',
'Delete Received Item': 'Удалить полученной номенклатуры',
'Delete Received Shipment': 'Удалить Полученных Поставка',
'Delete Record': 'Удалить запись',
'Delete Report': 'Удалить отчет',
'Delete Request Item': 'Требование удаления Элемента',
'Delete Request': 'Удалить требование',
'Delete Resource': 'удалить ресурс',
'Delete Room': 'Удалить помещение',
'Delete Scenario': 'Удалить сценарий',
'Delete Section': 'Удалить раздел',
'Delete Sector': 'Удалить Сектора',
'Delete Sent Item': 'Удалить отправленные Элемента',
'Delete Sent Shipment': 'Удалить отправленные Поставка',
'Delete Service Profile': 'Удалить профиль службы',
'Delete Setting': 'Удалить Параметр',
'Delete Skill Equivalence': 'Удалить навык Equivalence',
'Delete Skill Provision': 'Удалить навык Предоставления',
'Delete Skill Type': 'Удалить навык Тип',
'Delete Skill': 'Удалить навык',
'Delete Staff Type': 'Удалить Сотрудников Тип',
'Delete Status': 'Удалить состояние',
'Delete Subscription': 'Удалить подписку',
'Delete Subsector': 'Удалить Подсектора',
'Delete Survey Answer': 'Удалить Опрос Ответ',
'Delete Survey Question': 'Удалить Опроса Вопрос',
'Delete Survey Series': 'Удалить Опрос Ряда',
'Delete Survey Template': 'Удалить шаблон опроса',
'Delete Training': 'Удалить учебный',
'Delete Unit': 'Удалить блок',
'Delete User': 'Удалить пользователя',
'Delete Volunteer': 'удалить волонтера',
'Delete Warehouse': 'Удалить хранилище',
'Delete from Server?': 'Удалить с сервера?',
'Delete': 'Удалить',
'Delphi Decision Maker': 'ЛПР Дельфи',
'Demographic': 'Демографический',
'Demonstrations': 'Демонстрации',
'Dental Examination': 'Стоматологическое обследование',
'Dental Profile': 'Стоматологический Профиль',
'Describe the condition of the roads to your hospital.': 'Опишите состояние дорог к вашей больнице.',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'Опишите процедуру, к которой относится эта запись(например, \\ " медицинское обследование\\ ")',
'Description of Contacts': 'Описание контактов',
'Description of defecation area': 'Описание в defecation области',
'Description of drinking water source': 'Описание снабжения питьевой водой источника',
'Description of sanitary water source': 'Описание о санитарных вода источник',
'Description of water source before the disaster': 'Описание водных ресурсов источника до аварии',
'Description': 'Описание временного исправления',
'Desire to remain with family': 'Желание остаться с семейными',
'Destination': 'Цель',
'Destroyed': 'Уничтожено',
'Details field is required!': 'Сведения поле является обязательным!',
'Details': 'Таблица',
'Dialysis': 'Почечной',
'Diaphragms, horizontal bracing': 'Диафрагмы, горизонтальных готовится',
'Dignitary Visit': 'Посетите видного',
'Direction': 'Направленность',
'Disable': 'отключить',
'Disabled participating in coping activities': 'Отключен участвующих в преодолении деятельности',
'Disabled': 'отключен (отключены)',
'Disabled?': 'Отключен?',
'Disaster Victim Identification': 'Идентификация жертв бедствия',
'Disaster Victim Registry': 'Регистрация жертв бедствия',
'Disaster clean-up/repairs': 'Очистка / восстановление после бедствия',
'Discharge (cusecs)': 'Вытекание (кубический фут в секунду)',
'Discharges/24hrs': 'Сброс/сутки',
'Discussion Forum on item': 'дискуссионный форум по вопросу',
'Discussion Forum': 'дискуссионный форум',
'Disease vectors': 'Болезни векторов',
'Dispensary': 'Новорожденными',
'Displaced Populations': 'Перемещенных Населения',
'Displaced': 'Перемещенных',
'Display Polygons?': 'Отобразить многоугольники?',
'Display Routes?': 'Отображаемое маршрутов?',
'Display Tracks?': 'Отобразить дорожки?',
'Display Waypoints?': 'Отобразить точки маршрута?',
'Distance between defecation area and water source': 'Расстояние между областью очитсных сооружений и источником воды',
'Distance from %s:': 'Расстояние от% s:',
'Distance(Kms)': 'Расстояние (км)',
'Distribution groups': 'Группы распространения',
'Distribution': 'Распределение',
'District': 'Округ',
'Do you really want to delete these records?': 'Вы действительно хотите удалить эти записи?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Вы хотите отменить это получено поставка? , элементы будут удалены из реестра. Это действие нельзя отменить!',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Вы хотите отменить это отправлено поставка? Элементы, будут возвращены на склад. Это действие нельзя отменить!',
'Do you want to receive this shipment?': 'Вы хотите получить эту партию товара?',
'Do you want to send these Committed items?': 'Вы хотите послать эти зафиксированные элементы?',
'Do you want to send this shipment?': 'Вы хотите послать эту партию товара?',
'Document Details': 'Сведения о документе',
'Document Scan': 'Сканирование Документа',
'Document added': 'Документ добавлен',
'Document deleted': 'Документ удален',
'Document updated': 'Документ обновлен',
'Documents and Photos': 'Документы и Фотографии',
'Documents': 'Документы',
'Does this facility provide a cholera treatment center?': 'Предоставляется ли это возможность центром лечения холеры?',
'Doing nothing (no structured activity)': 'Ничего не делание (нет структурированной активности)',
'Dollars': 'Доллары',
'Domain': 'Домен',
'Domestic chores': 'Внутри освободило',
'Donated': 'Пожертвованный',
'Donation Certificate': 'Пожертвования Сертификат',
'Donation Phone #': 'Пожертвования Телефон #',
'Donor Details': 'Данные донора',
'Donor added': 'Донор добавлен',
'Donor deleted': 'Донор удален',
'Donor updated': 'Донор обновлен',
'Donor': 'Донор',
'Donors Report': 'Отчет доноров',
'Donors': 'Доноры',
'Door frame': 'Дверной проем',
'Download PDF': 'Скачать PDF',
'Draft': 'Черновые',
'Drainage': 'дренаж',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'составление бюджета на штаты и оборудование для различных мест',
'Drill Down by Group': 'Drill Вниз по группе',
'Drill Down by Incident': 'Drill Вниз по Инцидента',
'Drill Down by Shelter': 'Drill Вниз по Жильем',
'Driving License': 'Водительского',
'Drought': 'Засухе',
'Drugs': 'Наркотиков',
'Dug Well': 'А Также прорыт',
'Duplicate?': 'Дублирующиеся?',
'Duration': 'продолжительность',
'Dust Storm': 'Пыльная буря',
'Dwelling': 'проживание',
'EMS Reason': 'EMS Причины',
'EMS Status': 'EMS статус',
'ER Status Reason': 'ER причина состояния',
'ER Status': 'ER статус',
'Early Recovery': 'Раннее восстановление',
'Earthquake': 'Землетрясение',
'Edit Activity': 'Изменить операцию',
'Edit Address': 'Изменить адрес',
'Edit Alternative Item': 'Редактировать альтернативную позицию',
'Edit Application': 'Редактировать приложение',
'Edit Assessment Summary': 'Редактировать суммарную оценку',
'Edit Assessment': 'Редактировать оценку',
'Edit Asset Log Entry': 'Редактировать Запись журнала ресурсов',
'Edit Asset': 'Редактировать ассет',
'Edit Baseline Type': 'Редактировать тип базового уровня',
'Edit Baseline': 'Редактировать базовый уровень',
'Edit Brand': 'Редактировать бренд',
'Edit Budget': 'Редактировать Бюджет',
'Edit Bundle': 'Редактировать Комплект',
'Edit Camp Service': 'Редактировать Предоставление проживания в лагере',
'Edit Camp Type': 'Редактировать тип лагеря',
'Edit Camp': 'Редактировать Лагерь',
'Edit Catalog Item': 'Редактировать элемент каталога',
'Edit Catalog': 'Редактировать каталог',
'Edit Certificate': 'Редактировать сертификат',
'Edit Certification': 'Редактировать сертификацию',
'Edit Cluster Subsector': 'Редактировать Подсектор Кластера',
'Edit Cluster': 'Редактировать Кластер',
'Edit Commitment Item': 'Редактировать Делу Элемента',
'Edit Commitment': 'Редактировать обязательство',
'Edit Competency Rating': 'Редактировать рейтинг компетентности',
'Edit Competency': 'Редактировать компетентность',
'Edit Contact Information': 'Изменить данные контакта',
'Edit Contact': 'Изменить контакт',
'Edit Contents': 'Изменить содержимое',
'Edit Course Certificate': 'редактировать сертификат курса',
'Edit Course': 'Редактировать курс',
'Edit Credential': 'Изменить разрешение',
'Edit Dead Body Details': 'Редактировать Недоставленных Тело Сведения',
'Edit Description': 'описание формата',
'Edit Details': 'Редактировать детали',
'Edit Disaster Victims': 'Редактировать жертв катастрофы',
'Edit Document': 'Редактировать документ',
'Edit Donor': 'Редактировать донора',
'Edit Email Settings': 'Редактировать установки электронной почты',
'Edit Entry': 'Редактировать запись',
'Edit Event': 'Редактировать событие',
'Edit Facility': 'Редактировать средство',
'Edit Feature Layer': 'Функции редактирования Уровень',
'Edit Flood Report': 'Редактировать Наводнениями Отчета',
'Edit Gateway Settings': 'Редактировать параметры шлюза',
'Edit Group': 'Изменить группу',
'Edit Hospital': 'Редактировать Больницу',
'Edit Human Resource': 'Редактировать трудовые ресурсы',
'Edit Identification Report': 'Редактировать Код Отчета',
'Edit Identity': 'Редактировать Личность',
'Edit Image Details': 'Редактировать детали изображения',
'Edit Impact Type': 'Редактировать тип последствий',
'Edit Impact': 'Редактировать Последствия',
'Edit Incident Report': 'Редактировать отчет о происшествии',
'Edit Inventory Item': 'Редактировать складской номенклатуры',
'Edit Item Category': 'Редактировать категорию элементов',
'Edit Item Pack': 'Изменить элемент Pack',
'Edit Item': 'Изменить элемент',
'Edit Job Role': 'Редактировать роль работы',
'Edit Key': 'изменить ключ',
'Edit Kit': 'Редактировать Kit',
'Edit Layer': 'Изменить слой',
'Edit Level %d Locations?': 'Редактировать Уровень% d расположениях?',
'Edit Level 1 Assessment': 'Редактировать уровня 1 Оценки',
'Edit Level 2 Assessment': 'Редактировать оценку уровня 2',
'Edit Location': 'Редактировать расположение',
'Edit Log Entry': 'Редактировать запись журнала',
'Edit Map Profile': 'Редактировать конфигурации отображения',
'Edit Map Services': 'Редактировать Службы карты',
'Edit Marker': 'Редактировать Маркер',
'Edit Membership': 'Редактировать членство',
'Edit Message': 'Редактировать сообщение',
'Edit Messaging Settings': 'Редактировать параметры обмена сообщениями',
'Edit Mission': 'Редактировать Миссии',
'Edit Modem Settings': 'Редактировать параметры модема',
'Edit Need Type': 'Редактировать тип потребности',
'Edit Need': 'Редактировать потребность',
'Edit Office': 'Редактировать офис',
'Edit Options': 'Редактировать опции',
'Edit Organization': 'Редактировать организацию',
'Edit Parameters': 'Редактировать параметры',
'Edit Peer Details': 'Редактировать сведения о равноправном участнике сети',
'Edit Person Details': 'Редактировать личные сведения',
'Edit Personal Effects Details': 'Редактировать Сведения о личных вещах',
'Edit Photo': 'Редактировать Фото',
'Edit Population Statistic': 'Редактировать Населения Статистики',
'Edit Position': 'Редактировать должность',
'Edit Problem': 'Редактировать проблему',
'Edit Project': 'Редактировать проект',
'Edit Projection': 'Редактировать проекцию',
'Edit Rapid Assessment': 'Редактировать экспресс-оценку',
'Edit Received Item': 'Редактировать полученный элемент',
'Edit Received Shipment': 'Редактировать полученную поставку',
'Edit Record': 'Изменить запись',
'Edit Registration Details': 'Редактировать сведения о регистрации',
'Edit Registration': 'Редактировать Регистрации',
'Edit Request Item': 'Изменить запрос Элемента',
'Edit Request': 'Редактировать запрос',
'Edit Resource': 'Изменить ресурс',
'Edit River': 'Редактировать Реки',
'Edit Role': 'Редактировать роль',
'Edit Room': 'Редактировать Комнаты',
'Edit Scenario': 'Правка сценария',
'Edit Sector': 'Редактировать сектор',
'Edit Sent Item': 'Редактировать посланный элемент',
'Edit Setting': 'Редактировать установку',
'Edit Settings': 'Параметры редактирования',
'Edit Shelter Service': 'Редактировать Предоставление убежища',
'Edit Shelter Type': 'Редактировать Тип Убежища',
'Edit Shelter': 'Редактировать Приют',
'Edit Skill Equivalence': 'Изменить навык Equivalence',
'Edit Skill Provision': 'Изменить навык Предоставления',
'Edit Skill Type': 'Изменить навык Тип',
'Edit Skill': 'Изменить навык',
'Edit Solution': 'Редактировать Решения',
'Edit Staff Type': 'Редактировать Сотрудников Тип',
'Edit Subscription': 'Изменить подписку',
'Edit Subsector': 'Редактировать Подсектора',
'Edit Survey Answer': 'Редактировать Опрос Ответ',
'Edit Survey Question': 'Редактировать Опрос Вопрос',
'Edit Survey Series': 'Редактировать Опрос Ряда',
'Edit Survey Template': 'Редактировать шаблон опроса',
'Edit Task': 'Изменить задачу',
'Edit Team': 'Редактировать Группу',
'Edit Theme': 'Редактировать тему',
'Edit Themes': 'Редактировать Темы',
'Edit Ticket': 'Редактировать Паспорт',
'Edit Track': 'Редактировать курс',
'Edit Training': 'Редактировать обучение',
'Edit Tropo Settings': 'Редактировать Параметры Tropo',
'Edit User': 'Редактировать пользователя',
'Edit Volunteer Availability': 'Редактировать доступность волонтера',
'Edit Volunteer Details': 'Редактировать детали волонтера',
'Edit Warehouse': 'Редактировать Хранилище',
'Edit current record': 'Редактировать текущую запись',
'Edit message': 'Редактировать сообщение',
'Edit': 'редактировать',
'Editable?': 'Редактируемых?',
'Education materials received': 'Учебные материалы получены',
'Education materials, source': 'Учебные материалов, источник',
'Education': 'обучение',
'Effects Inventory': 'Эффекты Запасов',
'Eggs': 'Яйца',
'Either a shelter or a location must be specified': 'Либо в убежище или необходимо указать расположение',
'Either file upload or document URL required.': 'Либо файл загрузки или URL документа требуется.',
'Either file upload or image URL required.': 'Либо файл загрузки или URL изображения требуется.',
'Elderly person headed households (>60 yrs)': 'Домашних хозяйств возглавляемых пожилым лицом (>60 подшаблоне)',
'Electrical': 'Электрический',
'Electrical, gas, sewerage, water, hazmats': 'Электрический, газ, канализация вода, взрывчатые материалы',
'Elevated': 'Повышенный',
'Elevators': 'Лифты',
'Email Address': 'Электронный адрес',
'Email Settings': 'Параметры электронной почты',
'Email settings updated': 'Электронной почты параметры обновления',
'Email': 'электронный адрес',
'Embassy': 'Посольство',
'Emergency Capacity Building project': 'Чрезвычайных проекта по созданию потенциала',
'Emergency Department': 'Департамент чрезвычайной',
'Emergency Shelter': 'Чрезвычайной Жильем',
'Emergency Support Facility': 'Чрезвычайного фонда поддержки',
'Emergency Support Service': 'Чрезвычайной службы поддержки',
'Emergency Telecommunications': 'Чрезвычайных телекоммуникационных',
'Enable/Disable Layers': 'Разрешить/запретить слои',
'Enabled': 'разрешенный',
'End Date': 'конечная дата',
'End date should be after start date': 'Дата окончания должна быть после даты начала',
'End date': 'конечная дата',
'End of Period': 'Конец периода',
'English': 'Британские единицы',
'Enter Coordinates:': 'Введите координаты:',
'Enter a GPS Coord': 'Введите, GPS Coord',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Введите имя электронной таблицы, которую вы выгружаете (обязательно).',
'Enter a new support request.': 'Введите новый запрос о поддержке.',
'Enter a unique label!': 'Введите уникальную метку!',
'Enter a valid date before': 'Прежде введите действительную дату',
'Enter a valid email': 'Введите действительный электронный адрес',
'Enter a valid future date': 'Введите правильную будущую дату',
'Enter some characters to bring up a list of possible matches': 'Введите несколько символов чтобы извлечь список возможных совпадений',
'Enter some characters to bring up a list of possible matches.': 'Введите несколько символов чтобы поднять список возможных совпадений.',
'Enter tags separated by commas.': 'Введите теги, разделенных запятыми.',
'Enter the same password as above': 'Введите тот же пароль выше',
'Entered': 'ВВЕДЕНО',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Ввод телефонного номера необязателен, но это позволяет подписаться на получение SMS сообщений.',
'Entry deleted': 'Запись удалена',
'Environment': 'условия',
'Equipment': 'Оборудование',
'Error encountered while applying the theme.': 'Ошибка при применении темы.',
'Error in message': 'Ошибка в сообщении',
"Error logs for '%(app)s'": 'Журналы ошибок для "%(app)s"',
'Errors': 'ошибок',
'Est. Delivery Date': 'Эбт. Дата поставки',
'Estimated # of households who are affected by the emergency': 'Ожидаемое количество семей, которые пострадают в чрезвычайной ситуации',
'Estimated # of people who are affected by the emergency': 'Расчетное количество людей, которые пострадают в чрезвычайной ситуации',
'Estimated Overall Building Damage': 'Расчетный общий ущерб в строительстве',
'Estimated total number of people in institutions': 'Расчетное общее количество людей в организациях',
'Euros': 'Евро',
'Evacuating': 'Опорожнение',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Оцените информацию этого сообщения. (это значение не следует использовать в государственных предупреждение приложений. )',
'Event Details': 'Детали события',
'Event added': 'Событие добавлено',
'Event deleted': 'Событие удалено',
'Event updated': 'Событие обновлено',
'Event': 'событие',
'Events': 'С событиями',
'Example': 'Пример.',
'Exceeded': 'Превышено',
'Excellent': 'Отличный',
'Exclude contents': 'Исключить содержимое',
'Excreta disposal': 'Размещение экскрементов',
'Execute a pre-planned activity identified in <instruction>': 'Выполнить ранее запланированную деятельность, определенную в',
'Exercise': 'Упражнение',
'Exercise?': 'Упражнение?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Применения означают, что все экраны имеют водяной знак и все уведомления имеют префикс.',
'Existing Placard Type': 'Существующие Табло Тип',
'Existing food stocks': 'Существующие запасы продовольствия',
'Existing location cannot be converted into a group.': 'Существующее расположение не может быть преобразовано в группу.',
'Exits': 'Точки выхода',
'Experience': 'Опыт',
'Expiry Date': 'Срок действия',
'Explosive Hazard': 'Взрывоопасным Опасности',
'Export Data': 'Экспортировать данные',
'Export Database as CSV': 'Экспорт базы данных в формате CSV',
'Export in GPX format': 'Экспорт в GPX формате',
'Export in KML format': 'Экспорт в KML формате',
'Export in OSM format': 'Экспорт в OSM формате',
'Export in PDF format': 'Экспорт в формате PDF',
'Export in RSS format': 'Экспорт в формате RSS',
'Export in XLS format': 'Экспорт в формате XLS',
'Export': 'Экспортировать.',
'Exterior Only': 'Только наружный',
'Exterior and Interior': 'Наружный и внутренний',
'Eye Color': 'Цвет глаз',
'Facial hair, color': 'волосы на лице, цвет',
'Facial hair, type': 'Волосы на лице, тип',
'Facial hear, length': 'Волосы на лице, длина',
'Facilities': 'Средства',
'Facility Details': 'Детали средства',
'Facility Operations': 'Возможные операции',
'Facility Status': 'Статус возможности',
'Facility Type': 'Тип возможностей',
'Facility added': 'средство добавлено',
'Facility or Location': 'Средство или расположение',
'Facility removed': 'Facility удален',
'Facility updated': 'Утилита обновляется',
'Facility': 'средство',
'Fail': 'сбой',
'Failed!': 'Сбой',
'Fair': 'Удовлетворительно',
'Falling Object Hazard': 'Падение Объект Опасности',
'Families/HH': 'Семей/чч',
'Family tarpaulins received': 'Семьи tarpaulins полученных',
'Family tarpaulins, source': 'Семьи tarpaulins, источник',
'Family': 'Название',
'Family/friends': 'Семьи/друзей',
'Farmland/fishing material assistance, Rank': 'Материальную помощь фермерская земля/рыбный промысел, классифицировать',
'Fatalities': 'Смерти',
'Fax': 'Факс',
'Feature Layer Details': 'Функция Слой Сведения',
'Feature Layer added': 'Функция Layer добавил',
'Feature Layer deleted': 'Функция Layer удалена',
'Feature Layer updated': 'Функция Уровня обновления',
'Feature Layers': 'Функция Уровнями',
'Feature Namespace': 'Функция Имен',
'Feature Request': 'Запрос компонентов',
'Feature Type': 'Тип комплекта',
'Features Include': 'Возможности включают',
'Female headed households': 'Домашних хозяйств возглавляемых женщинами',
'Female': 'Женский',
'Few': 'Несколько',
'Field Hospital': 'Полевой госпиталь',
'Field': 'Поле',
'File': 'Файл',
'Fill in Latitude': 'Заполните широту',
'Fill in Longitude': 'Заполните долготу',
'Filter Field': 'Поле фильтра',
'Filter Value': 'Значение фильтра',
'Filter': 'Фильтр',
'Find Dead Body Report': 'Найти отчет об умерших',
'Find Hospital': 'Найти Больницу',
'Find Person Record': 'Найти пользователя Записи',
'Find Volunteers': 'Найти Добровольцев',
'Find a Person Record': 'Найти запись субьекта',
'Find': 'поиск',
'Finder': 'определитель',
'Fingerprint': 'Отпечаток',
'Fingerprinting': 'Дактилоскопия',
'Fingerprints': 'Отпечатки пальцев',
'Finished Jobs': 'Завершенные задания',
'Fire suppression and rescue': 'Пожаротушение и спасение',
'Fire': 'Пожар',
'First Name': 'Имя',
'First name': 'Имя',
'Fishing': 'рыбная ловля',
'Flash Flood': 'Flash Наводнениями',
'Flash Freeze': 'Flash Заморозить',
'Flexible Impact Assessments': 'Гибкие оценки воздействия',
'Flood Alerts show water levels in various parts of the country': 'В предупреждениях о наводнении указаны уровни воды в разных районах страны',
'Flood Alerts': 'Наводнениям Оповещения',
'Flood Report Details': 'Детали Отчета о Наводнении',
'Flood Report added': 'Отчет о Наводнении добавлен',
'Flood Report deleted': 'Отчет о Наводнении удален',
'Flood Report updated': 'Отчет о Наводнении обновлен',
'Flood Report': 'Отчет о Наводнении',
'Flood Reports': 'Отчеты о Наводнении',
'Flood': 'Наводнениям',
'Flow Status': 'Состояния потока',
'Fog': 'Туман',
'Food Supply': 'Поставок продовольствия',
'Food assistance': 'Продовольственная помощь',
'Food': 'Пища',
'Footer file %s missing!': 'Колонтитула файл% s отсутствует!',
'Footer': 'нижний колонтитул',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Для страны это был бы код ISO2, для города это может быть код положения аэропорта.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Для каждого синхронизации партнера, существует стандартное задание синхронизации, за указанный интервал времени. Можно также настроить более синхр задания может быть настраиваемого по своему усмотрению. Щелкните ссылку на право получать запущен.',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'В целях повышения безопасности рекомендуется ввести имя пользователя и пароль, и уведомить администраторов других компьютеров в вашей организации о добавлении этого имени пользователя и пароля при помощи UUID в Synchronization -> Sync Partners',
'For live help from the Sahana community on using this application, go to': 'Для реальной помощи со стороны сообщества Sahana по использованию данного приложения, перейти к',
'For messages that support alert network internal functions': 'Для сообщений, которые поддерживают предупреждающие сетевые внутренние функции',
'For more details on the Sahana Eden system, see the': 'Более подробную информацию о Sahana Эдема системы приведены в',
'For more information, see': 'Дополнительная информация содержится в',
'For': 'Цикл For',
'Forest Fire': 'Лесной пожар',
'Formal camp': 'Официальный лагерь',
'Format': 'Формат',
'Forms': 'формы',
'Found': 'найдено',
'Freezing Drizzle': 'Замораживание Drizzle',
'Freezing Rain': 'дождь с образованием гололёда',
'Freezing Spray': 'Охлаждающий спрэй',
'French': 'французский',
'Friday': 'Пятница',
'From Inventory': 'Из запасов',
'From Location': 'Из участка',
'From Organization': 'Из организации',
'From': 'с',
'Fulfil. Status': 'Выполнять. статус',
'Fulfillment Status': 'Состояние выполнения',
'Full beard': 'Полный бороду',
'Full': 'полный',
'Fullscreen Map': 'Fullscreen Карты',
'Functions available': 'Доступные функции',
'Funding Organization': 'Финансирование Организации',
'Funeral': 'Похороны',
'Further Action Recommended': 'Дальнейших действий Рекомендуется',
'GIS Reports of Shelter': 'Отчеты шельтера по GIS',
'GIS integration to view location details of the Shelter': 'Интеграция GIS для обзора деталей расположения шельтера.',
'GPS Marker': 'GPS Маркер',
'GPS Track File': 'GPS Файл курса',
'GPS Track': 'курс GPS',
'GPX Track': 'Курс GPX',
'GRN Status': 'GRN Состояние',
'Gale Wind': 'Предметный указатель Ветровой',
'Gap Analysis Map': 'Gap Анализ Карты',
'Gap Analysis': 'анализ узких мест',
'Gap Map': 'Gap Карты',
'Gap Report': 'Gap Отчета',
'Gateway Settings': 'Параметры шлюза',
'Gateway settings updated': 'Параметры шлюза обновляется',
'Gateway': 'шлюз',
'Gender': 'Пол',
'General Comment': 'Замечание общего',
'General Medical/Surgical': 'Общий Медицинский/Хирургический',
'General emergency and public safety': 'Общие чрезвычайных и общественной безопасности',
'General information on demographics': 'Общие сведения о demographics',
'General': 'обычный',
'Generator': 'Генератор',
'Geocode': 'geocode',
'Geocoder Selection': 'Выбор геокодера',
'Geometry Name': 'Название геометрии',
'Geophysical (inc. landslide)': 'Геофизическом (inc. Оползень)',
'Geotechnical Hazards': 'Геотехнические опасности',
'Geotechnical': 'геотехнический',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Модуль Geraldo не доступен в запущенном Python - необходима установка для вывода PDF!',
'Get incoming recovery requests as RSS feed': 'Получить входящие запросы на восстановление как ленту RSS',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Дайте краткое описание изображения, например, что и где можно рассмотреть на рисунке (необязательно).',
'Give information about where and when you have seen them': 'Предоставьте информацию о том, где и когда вы видели их',
'Global Messaging Settings': 'Параметры Глобального обмена сообщениями',
'Go to Request': 'Перейдите на Запрос',
'Go': 'идти',
'Goatee': 'эспаньолка',
'Good Condition': 'Хорошие Условия',
'Good': 'Хорошо',
'Goods Received Note': 'Извещение о том, что товары получены',
'Government UID': 'Правительственный UID',
'Government building': 'Правительственное здание',
'Government': 'Правительство',
'Grade': 'градус',
'Greek': 'греческий',
'Green': 'зеленый',
'Ground movement, fissures': 'Местах передвижения, раскол',
'Ground movement, settlement, slips': 'Местах передвижения, урегулирования, отборочных накладных',
'Group Description': 'Описание группы',
'Group Details': 'Сведения о группе',
'Group Member added': 'Член группы добавлены',
'Group Members': 'Элементы группы',
'Group Memberships': 'Членство в группе',
'Group Name': 'Имя группы',
'Group Title': 'Заголовок группы',
'Group Type': 'Тип группы',
'Group added': 'Группа добавлена',
'Group deleted': 'Группа удалена',
'Group description': 'Описание группы',
'Group updated': 'Группа обновлена',
'Group': 'Сгруппировать',
'Groups removed': 'Удалены группы',
'Groups': 'Группы',
'Guest': 'Гость',
'HR Manager': 'Менеджер по персоналу',
'Hail': 'привет!',
'Hair Color': 'Цвет волос',
'Hair Length': 'Длина волос',
'Hair Style': 'Тип волос',
'Has data from this Reference Document been entered into Sahana?': 'Данные из этого справочного документа были введены Sahana?',
'Has the Certificate for receipt of the shipment been given to the sender?': 'Имеет сертификат для получения этого поставка был предоставлен для отправителя?',
'Has the GRN (Goods Received Note) been completed?': 'Содержит GRN (полученных товаров примечание) были завершены?',
'Hazard Pay': 'Доплата за риск',
'Hazardous Material': 'Опасные материалы',
'Hazardous Road Conditions': 'Опасные дорожные условия',
'Header Background': 'Фон заголовка',
'Header background file %s missing!': 'отсутствует файл фона заголовков!',
'Headquarters': 'Штаб-квартира',
'Health care assistance, Rank': 'Оказание медицинской помощи, должность',
'Health center with beds': 'Центр работоспособности с коек',
'Health center without beds': 'Центр работоспособности не коек',
'Health center': 'Центр состояния базы данных',
'Health services status': 'Состояние служб здравоохранения',
'Health': 'Здоровье',
'Healthcare Worker': 'Здравоохранения Обработчика',
'Heat Wave': 'Тепловая Волна',
'Heat and Humidity': 'Теплота и сырость',
'Height (cm)': 'Высота (см)',
'Height (m)': 'Высота (m)',
'Height': 'высота',
'Help': '- Справка',
'Helps to monitor status of hospitals': 'Помогает следить за состоянием больницы',
'Helps to report and search for missing persons': 'Помогает сообщить и выявить без вести пропавших',
'Here are the solution items related to the problem.': 'Вот элементы решения, относящиеся к проблеме.',
'Heritage Listed': 'Наследия Перечисленных',
'Hierarchy Level 0 Name (i.e. Country)': 'Уровень иерархии имени 0 (то есть страна)',
'Hierarchy Level 1 Name (e.g. State or Province)': 'Уровень иерархии имени 1 (например, государство или провинция)',
'Hierarchy Level 2 Name (e.g. District or County)': 'Уровень иерархии имени 2 (например, район или графство)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Уровень иерархии имени 3 (например, город/поселок/деревня)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Уровень иерархии имени 4 (например, район)',
'Hierarchy Level 5 Name': 'Уровень иерархии 5 имя',
'High Water': 'Высокой Воды',
'High': 'Максимум',
'Hindu': 'Индуистской',
'History': 'Хронология',
'Hit the back button on your browser to try again.': 'Нажмите кнопку возврата в браузере чтобы попытаться снова.',
'Holiday Address': 'Праздничный адрес',
'Home Address': 'Домашний адрес',
'Home Country': 'Страна или регион (дом. адрес)',
'Home Crime': 'Домашняя Преступление',
'Home': 'Начальная страница',
'Hospital Details': 'Сведения о больнице',
'Hospital Status Report': 'Отчет о состоянии больницы',
'Hospital information added': 'Информация о больнице добавлена',
'Hospital information deleted': 'Информация о больнице удалена',
'Hospital information updated': 'Информация о больнице обновлена',
'Hospital status assessment.': 'Оценки состояния больницы .',
'Hospital': 'Больница',
'Hospitals': 'Больницы',
'Hot Spot': 'активная точка',
'Hour': 'Час',
'Hours': 'часы',
'Household kits received': 'наборы хозяйственных инструментов получены',
'Household kits, source': 'Домашнее оборудование, источник',
'How does it work?': 'Как работает продукт?',
'How is this person affected by the disaster? (Select all that apply)': 'Каковы последствия этого бедствия для этого человека? (выберите все подходящие варианты)',
'How long will the food last?': 'Как долго будет в продовольствии фамилия?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'Сколько мальчиков (0 - 17 лет), Недоставленных Сообщений из - за кризиса',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'Сколько мальчиков (0 - 17 лет), Потерпевшее из - за кризиса',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'Сколько мальчиков (0 - 17 подшаблоне) отсутствуют из - за кризиса',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'Сколько девочек (0 - 17 лет) умерли во время кризиса',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'Сколько девочек (0 - 17 лет) ранены во время кризиса',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'Сколько девочек (0 - 17 лет) пропали во время кризиса',
'How many Men (18 yrs+) are Dead due to the crisis': 'Сколько мужчин (18 лет+) скончалось во время кризиса',
'How many Men (18 yrs+) are Injured due to the crisis': 'Сколько мужчин (18 yrs+), Потерпевшее из - за кризиса',
'How many Men (18 yrs+) are Missing due to the crisis': 'Сколько мужчин (18 yrs+) отсутствуют из - за кризиса',
'How many Women (18 yrs+) are Dead due to the crisis': 'Сколько женщин (18 лет и старше) погибло из - за кризиса',
'How many Women (18 yrs+) are Injured due to the crisis': 'Сколько женщин (18 лет и старше) ранены во время кризиса',
'How many Women (18 yrs+) are Missing due to the crisis': 'Сколько женщин (от 18 лет и старше) считаются пропавшими из-за кризиса?',
'How many days will the supplies last?': 'Сколько дней будут поставки фамилия?',
'How many new cases have been admitted to this facility in the past 24h?': 'Сколько новых случаев были допущены на это средство в прошлом 24ч?',
'How many of the patients with the disease died in the past 24h at this facility?': 'Как многих пациентов с этой болезнью погибли в прошлом 24ч на этом утилитой?',
'How many patients with the disease are currently hospitalized at this facility?': 'Сколько пациентов с этой болезнью в настоящее время госпитализированы в этом здании?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Насколько детализировано изображение. Высокой уровень увеличения масштаба означает большую детальность, но не широкую область. Масштаб низкого уровня означает возможность видеть участок большой площади, но с низким уровнем детализации.',
'Human Resource Details': 'Сведения людских ресурсов',
'Human Resource Management': 'Управление трудовыми ресурсами',
'Human Resource added': 'Добавлен людских ресурсов',
'Human Resource removed': 'Трудовые ресурсы удалены',
'Human Resource updated': 'Трудовые ресурсы обновлены',
'Human Resource': 'Людские ресурсы',
'Human Resources Management': 'Управление трудовыми ресурсами',
'Human Resources': 'Отдел кадров',
'Humanitarian NGO': 'Гуманитарной нпо',
'Hurricane Force Wind': 'Ураган Force Ветра',
'Hurricane': 'Ураган',
'Hygiene NFIs': 'Гигиены NFIs',
'Hygiene kits received': 'Гигиены комплектов полученных',
'Hygiene kits, source': 'Гигиены kits, источник',
'Hygiene practice': 'Гигиены практике',
'Hygiene problems': 'Гигиены проблем',
'Hygiene': 'Гигиены',
'I am available in the following area(s)': 'Я доступен в следующие области (s)',
'ID Tag Number': 'ID номер тега',
'ID Tag': 'Тег ID',
'ID Type': 'Тип ИД',
'Ice Pressure': 'Ice Давление',
'Iceberg': 'Айсберга',
'Identification Report': 'Код Отчета',
'Identification Reports': 'Код Отчетов',
'Identification Status': 'Идентификация состояние',
'Identification': 'идентификация',
'Identified as': 'Определяется как',
'Identified by': 'Определяется по',
'Identity Details': 'Identity Сведения',
'Identity added': 'Identity добавлено',
'Identity deleted': 'Identity удален',
'Identity updated': 'Identity обновляется',
'Identity': 'Субъект',
'If a ticket was issued then please provide the Ticket ID.': 'Если паспорт выполнялось укажите ID паспорта.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Если пользователь проверяется их с адрес электронной почты с этим доменом утверждающему, это поле используется для определения& кем дополнительно требуется утверждение.',
'If it is a URL leading to HTML, then this will downloaded.': 'Если это URL ведущий к HTML, то этот последний будет загружен.',
'If neither are defined, then the Default Marker is used.': 'Если ни определены, то По Умолчанию Маркер используется.',
'If no marker defined then the system default marker is used': 'Если маркер не, то система по умолчанию используется маркер',
'If no, specify why': 'Если нет, укажите почему',
'If none are selected, then all are searched.': 'Если ничего не выбрано, просматривается все.',
'If the location is a geographic area, then state at what level here.': 'Если расположение, географического района, то тогда государство на каком уровне здесь.',
'If the request type is "Other", please enter request details here.': 'Если тип запроса - "Other", введите здесь детали запроса.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Если это поле заполняется то пользователь в домене, будет автоматически предоставлена в Сотрудников этой организации',
'If this is set to True then mails will be deleted from the server after downloading.': 'Если это установлено в True затем сообщений будут удалены с сервера после загрузки.',
'If this record should be restricted then select which role is required to access the record here.': 'Если эта запись должна быть ограничена затем укажите какой требуется роль обратиться к записи здесь.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Если эта запись должна быть ограничена затем укажите какой роли (s) могут обратиться к записи здесь.',
'If yes, specify what and by whom': 'Если да, укажите что и кем',
'If yes, which and how': 'Если да, какие и как',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Если вы не вводите ссылочный документ, тогда будет показан ваш электронный почтовый ящик, что позволит проверить эти данные.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'Если вы знаете, что Geonames идентификатор данного находится затем можно введите его здесь.',
'If you know what the OSM ID of this location is then you can enter it here.': 'Если вы знаете, что OSM идентификатор данного находится затем можно введите его здесь.',
'If you need to add a new document then you can click here to attach one.': 'Если вам необходимо добавить новый документ затем можно щелкнуть здесь чтобы присоединить один.',
'If you want several values, then separate with': 'Если вы хотите несколько значений, то разделяйте',
'If you would like to help, then please': 'Если вы хотите помочь, то пожалуйста',
'Illegal Immigrant': 'Нелегальный иммигрант',
'Image Details': 'Подробности об образе',
'Image Tags': 'Image Теги',
'Image Type': 'Тип образа',
'Image added': 'Изображение добавлено',
'Image deleted': 'Image удален',
'Image updated': 'Image обновляется',
'Image': 'рисунок',
'Images': 'образы',
'Impact Assessments': 'Оценки воздействия',
'Impact Details': 'Влияние Сведения',
'Impact Type Details': 'Влияние сведения о типе',
'Impact Type added': 'Влияние добавлен тип',
'Impact Type deleted': 'Тип воздействия удален',
'Impact Type updated': 'Тип воздействия обновляется',
'Impact Type': 'Тип влияния',
'Impact Types': 'Влияние Типы',
'Impact added': 'Влияние добавлен',
'Impact deleted': 'Влияние удален',
'Impact updated': 'Влияние обновления',
'Impacts': 'Воздействия',
'Import & Export Data': 'Импорт и экспорт данных',
'Import Data': 'Импортировать данные',
'Import Jobs': 'Импорт заданий',
'Import and Export': 'Импорт и экспорт',
'Import from Ushahidi Instance': 'Импорт из Ushahidi Экземпляр',
'Import if Master': 'Импортируйте если Master',
'Import multiple tables as CSV': 'Импорт нескольких таблиц в формате CSV',
'Import': 'импортировать',
'Import/Export': 'Импорт/экспорт',
'Important': 'Важно',
'Importantly where there are no aid services being provided': 'Важно там, где не предоставляются услуги поддержки',
'Importing data from spreadsheets': 'Импорт данных из электронных таблиц',
'Improper decontamination': 'Неправильное обеззараживанию',
'Improper handling of dead bodies': 'Неправильного обращения тел погибших',
'In Catalogs': 'В каталогах',
'In Inventories': 'В Кадастров',
'In Process': 'В процессе',
'In Progress': 'исполняемый в текущий момент',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'В окне layout карты максимизирующую для заполнения окна, поэтому нет необходимости устанавливать значение велико здесь.',
'Inbound Mail Settings': 'Параметры входящей почты',
'Incident Categories': 'Категории Инцидентов',
'Incident Report Details': 'Подробности отчета о происшествии',
'Incident Report added': 'Отчет об Инциденте добавлен',
'Incident Report deleted': 'Отчет об Инциденте удален',
'Incident Report updated': 'Отчет о происшествии обновлен',
'Incident Report': 'ОТЧЕТ ОБ ИНЦИДЕНТЕ',
'Incident Reporting System': 'Система составления отчетов о происшествиях',
'Incident Reporting': 'Отчеты о происшествии',
'Incident Reports': 'Отчеты о происшествиях',
'Incident': 'Инцидент',
'Incidents': 'Происшествия',
'Incoming Shipment canceled': 'Входящая отгрузка отменена',
'Incoming Shipment updated': 'Входящая отгрузка обновлена',
'Incoming': 'Входящий',
'Incomplete': 'Незавершенный',
'Individuals': 'индивидуумы',
'Industrial Crime': 'Промышленное преступление',
'Industrial': 'Индустриальный',
'Industry Fire': 'Индустрия пожара',
'Infant (0-1)': 'Младенческая (0 - 1)',
'Infectious Disease (Hazardous Material)': 'Инфекционное заболевание (опасные материалы)',
'Infectious Disease': 'Инфекционных заболеваний',
'Infectious Diseases': 'Инфекционных заболеваний',
'Infestation': 'Необезвреженными',
'Informal Leader': 'Неофициальные Лидер',
'Informal camp': 'Неформальный лагерь',
'Information gaps': 'Информационные пробелы',
'Infusion catheters available': 'Catheters доступных вливания',
'Infusion catheters need per 24h': 'Вливания catheters необходимо на 24h',
'Infusion catheters needed per 24h': 'Вливания catheters требуется на 24h',
'Infusions available': 'Раза вводили доступны',
'Infusions needed per 24h': 'Раза вводили требуется на 24h',
'Inspected': 'Инспектировано',
'Inspection Date': 'Инспекционных Дата',
'Inspection date and time': 'Инспекциям дата и время',
'Inspection time': 'Инспекционных время',
'Inspector ID': 'Инспекторе ID',
'Instant Porridge': 'Instant Стульях',
'Institution': 'Учреждение',
'Insufficient vars: Need module, resource, jresource, instance': 'Недостаточно vars: требуется модуль, ресурс, jresource, экземпляр',
'Insufficient': 'Недостаточно',
'Intake Items': 'Всасывающей Элементы',
'Intergovernmental Organization': 'Межправительственная организация',
'Interior walls, partitions': 'Внутренних стены, перегородки',
'Internal State': 'Внутреннее состояние',
'International NGO': 'Международная неправительственная организация',
'International Organization': 'Международная организация',
'Interview taking place at': 'Интервью место в',
'Invalid Query': 'Неверный запрос',
'Invalid request!': 'Неверный запрос!',
'Invalid ticket': 'Неверный паспорт',
'Invalid': 'Ошибка',
'Inventories': 'Кадастрах',
'Inventory Item Details': 'Складской номенклатуры Сведения',
'Inventory Item updated': 'Складской номенклатуры обновляется',
'Inventory Item': 'Складской номенклатуры',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Складских номенклатур включают как расходные& которые будет превращен активов в их назначения.',
'Inventory Items': 'Складских номенклатур',
'Inventory Management': 'Управление складом',
'Inventory of Effects': 'Запасов последствий',
'Inventory': 'Товарные запасы',
'Is editing level L%d locations allowed?': '- редактирования уровень L% d расположениях разрешено?',
'Is it safe to collect water?': 'Это безопасно для сбора воды?',
'Is this a strict hierarchy?': 'Это строгое иерархии?',
'Issuing Authority': 'орган власти, издающий приказы, выдающий ордера, разрешения и пр.',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Он записывает не только места, где они активны, но и собирает сведения в отношении круга проектов обеспечивающие в каждой области.',
'Item Added to Shipment': 'Элемент добавлен к отгрузке',
'Item Catalog Details': 'Элемент сведения о каталоге',
'Item Categories': 'категории элемента',
'Item Category Details': 'Детали категории элемента',
'Item Category added': 'Категория элемента добавлена',
'Item Category deleted': 'Категория элемента удалена',
'Item Category updated': 'Категория элемента обновлена',
'Item Category': 'Категория элемента',
'Item Details': 'Детали элемента',
'Item Pack Details': 'Детали элемента пакет',
'Item Pack added': 'Элемент Пакет добавлен',
'Item Pack deleted': 'Элемент Пакет удален',
'Item Pack updated': 'Элемент Пакета обновления',
'Item Packs': 'Элемент Packs',
'Item added to Inventory': 'Элемент добавлен в запасы',
'Item added to shipment': 'Элемент добавлен к отгрузке',
'Item added': 'ЭЛЕМЕНТ ДОБАВЛЕН',
'Item already in Bundle!': 'Элемент уже в комплекте!',
'Item already in Kit!': 'Элемент уже в Комплект!',
'Item already in budget!': 'Элемент уже в бюджет!',
'Item deleted': 'Элемент удалён',
'Item removed from Inventory': 'Элемент удалить из запасов',
'Item updated': 'Элемент обновлен',
'Item': 'элемент',
'Items in Category can be Assets': 'Элементы в категории могут быть Активы',
'Items': 'элементы',
'Japanese': 'японский',
'Jerry can': 'Джерри можно',
'Jew': 'Чужака',
'Job Role Catalog': 'Задание Роль Каталога',
'Job Role Details': 'Задание сведений о роли',
'Job Role added': 'Задание добавлена роль',
'Job Role deleted': 'Задание роль удалена',
'Job Role updated': 'Роль задания обновления',
'Job Role': 'Роль задания',
'Job Roles': 'Задания Ролей',
'Job Title': 'Должность',
'Jobs': 'задания',
'Journal Entry Details': 'Сведения о записи журнала',
'Journal entry added': 'Добавлена запись журнала',
'Journal entry deleted': 'Журнал запись удалена',
'Journal entry updated': 'Запись в журнале обновлена',
'Journal': 'Журналы',
'Key Details': 'Детали ключа',
'Key added': 'Ключ добавлен',
'Key deleted': 'Ключ удален',
'Key updated': 'Ключ обновлен',
'Key': 'ключ',
'Keys': 'ключи',
'Kit Contents': 'Содержимого комплекта',
'Kit Details': 'Сведения о комплекте',
'Kit Updated': 'Комплект Обновления',
'Kit added': 'Комплект добавлен',
'Kit deleted': 'Комплект удален',
'Kit updated': 'Комплект обновления',
'Kit': 'Набор',
'Kits': 'наборы',
'Known Identities': 'Известное Идентификаторами',
'Known incidents of violence against women/girls': 'Известное случаи насилия в отношении женщин/девочек',
'Known incidents of violence since disaster': 'Известное случаи насилия после бедствия',
'LICENCE': 'ЛИЦЕНЗИИ',
'LICENSE': 'лицензия',
'Lack of material': 'Отсутствие материала',
'Lack of school uniform': 'Отсутствие школьной формы',
'Lack of supplies at school': 'Отсутствие материалов в школе',
'Lack of transport to school': 'Отсутствие доставки в школу',
'Lactating women': 'Кормящие женщины',
'Landslide': 'Оползень',
'Language': 'Язык',
'Last Name': 'Фамилия',
'Last known location': 'Последнее известное местоположение',
'Last synchronization time': 'Время последней синхронизации',
'Last updated by': 'Кем изменено',
'Last updated on': 'Последнее обновление в',
'Last updated': 'Последнее обновление',
'Latitude & Longitude': 'Широты & Долгота',
'Latitude is North-South (Up-Down).': 'Широты - север - юг (вверх - вниз).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Широта экватора равна нулю, и она положительна в северном полушарии и отрицательна в южном.',
'Latitude of Map Center': 'Широта центральной точки карты',
'Latitude of far northern end of the region of interest.': 'Широта дальней северной оконечности района действий.',
'Latitude of far southern end of the region of interest.': 'Широта дальней южной оконечности района действий.',
'Latitude %(lat)s is invalid, should be between %(lat_min)s & %(lat_max)s': 'Широта %(lat)s является недействительным, должно быть между %(lat_min)s & %(lat_max)s',
'Latitude': 'Широта',
'Latrines': 'уборные',
'Law enforcement, military, homeland and local/private security': 'Правоохранительные органы, вооруженные силы, отечественные и местные/частные силы безопасности',
'Layer Details': 'Layer Сведения',
'Layer added': 'Layer добавил',
'Layer deleted': 'Layer удалена',
'Layer updated': 'Layer обновляется',
'Layer': 'Уровень',
'Layers updated': 'Уровнями обновляется',
'Layers': 'Слои',
'Leader': 'Руководитель',
'Legend Format': 'Формат условных обозначений',
'Length (m)': 'Длина (m)',
'Level 1 Assessment Details': 'Уровень 1 оценка Сведения',
'Level 1 Assessment added': 'Уровень 1 оценка добавлен',
'Level 1 Assessment deleted': 'Уровень 1 оценка удаленные',
'Level 1 Assessment updated': 'Уровень 1 оценка обновляется',
'Level 1 Assessments': 'Уровень 1 оценок',
'Level 1': 'Уровень 1',
'Level 2 Assessment Details': 'Детали оценки уровня 2',
'Level 2 Assessment added': 'Добавлена оценка уровня 2',
'Level 2 Assessment deleted': 'Удалена оценка уровня 2',
'Level 2 Assessment updated': 'Оценка уровня 2 обновлена',
'Level 2 Assessments': 'Оценки уровня 2',
'Level 2 or detailed engineering evaluation recommended': 'Уровень 2 или рекомендована подробная инженерная оценка',
'Level 2': 'Уровень 2',
'Level': 'уровень',
'Library support not available for OpenID': 'Поддержка библиотеки недоступена для OpenID',
'List / Add Baseline Types': 'Список/Add Baseline Типы',
'List / Add Impact Types': 'Список/Add Влияние Типы',
'List / Add Services': 'Список/добавить службы',
'List / Add Types': 'Список/добавить типы',
'List Activities': 'Список Деятельности',
'List All Assets': 'Перечислить все активы',
'List All Catalog Items': 'Список всех некаталожные элементы',
'List All Commitments': 'Список всех обязательств',
'List All Entries': 'Список всех записей',
'List All Item Categories': 'Список всех элементов Категории',
'List All Memberships': 'Список всех Членство',
'List All Received Shipments': 'Список все полученные Поставок',
'List All Records': 'Список всех записей',
'List All Requested Items': 'Список всех запрошенных номенклатур',
'List All Requests': 'Список всех запросов',
'List All Sent Shipments': 'Список всех отправленных партий товаров',
'List All': 'Показать все',
'List Alternative Items': 'Список альтернативных изделий',
'List Assessment Summaries': 'Список Оценки Резюме',
'List Assessments': 'Список Взносов',
'List Assets': 'Список активов',
'List Availability': 'Список пригодности',
'List Baseline Types': 'Список Baseline Типы',
'List Baselines': 'Список Исходных',
'List Brands': 'Список Товарных Знаков',
'List Budgets': 'Список Бюджетов',
'List Bundles': 'Список Комплектов',
'List Camp Services': 'Список Лагерных Служб',
'List Camp Types': 'Список Типов лагерей',
'List Camps': 'Список Лагерей',
'List Catalog Items': 'Список Элементов Каталога',
'List Catalogs': 'Список Каталогов',
'List Certificates': 'Список Сертификатов',
'List Certifications': 'Список Сертификаты',
'List Checklists': 'Список Справочных',
'List Cluster Subsectors': 'Список CRG Подсекторов',
'List Clusters': 'Список Кластеров',
'List Commitment Items': 'Список элементов обязательства',
'List Commitments': 'Список обязательств',
'List Competencies': 'Список Качеств',
'List Competency Ratings': 'Список Competency Рейтинговое',
'List Conflicts': 'Список конфликтных ситуаций',
'List Contact Information': 'Список контактная информация',
'List Contacts': 'Список Контактов',
'List Course Certificates': 'Список Курса Сертификатов',
'List Courses': 'Список Курсов',
'List Credentials': 'Список Учетные',
'List Current': 'Список текущих',
'List Documents': 'Список Документов',
'List Donors': 'Список Доноров',
'List Events': 'Показать события',
'List Facilities': 'Список Объектов',
'List Feature Layers': 'Список слоев свойств',
'List Flood Reports': 'Список отчетов о наводнении',
'List Groups': 'Список групп',
'List Groups/View Members': 'Список групп/просматривать членов',
'List Hospitals': 'Список Больниц',
'List Human Resources': 'Список людские ресурсы',
'List Identities': 'Список Идентификаторами',
'List Images': 'Список Изображений',
'List Impact Assessments': 'Список оценок воздействия',
'List Impact Types': 'Список Влияние Типы',
'List Impacts': 'Список Воздействия',
'List Incident Reports': 'Список Инцидента Отчеты',
'List Item Categories': 'Категории списка элементов',
'List Item Packs': 'Элемент списка Пакеты',
'List Items in Inventory': 'Список номенклатур в запасах',
'List Items': 'Элементы списков',
'List Job Roles': 'Список ролей заданий',
'List Keys': 'Список ключей',
'List Kits': 'Список Kits',
'List Layers': 'Список Уровнями',
'List Level 1 Assessments': 'Список оценок уровня 1',
'List Level 1 assessments': 'Список оценок уровня 1',
'List Level 2 Assessments': 'Список оценок уровня 2',
'List Level 2 assessments': 'Список оценок уровня 2',
'List Locations': 'Список расположений',
'List Log Entries': 'Список записей журнала',
'List Map Profiles': 'Список конфигураций отображения',
'List Markers': 'Список Маркеров',
'List Members': 'Список членов',
'List Memberships': 'Список Членство',
'List Messages': 'Показать список сообщений',
'List Missing Persons': 'Список пропавших без вести лиц',
'List Missions': 'Список Миссий',
'List Need Types': 'Список Необходимо Типы',
'List Needs': 'Список необходимо',
'List Offices': 'Список Отделений',
'List Organizations': 'Список Организаций',
'List Peers': 'Список Peers',
'List Personal Effects': 'Список Личных Эффекты',
'List Persons': 'Список Лиц',
'List Photos': 'Список Фотографий',
'List Population Statistics': 'Список статистики населения',
'List Positions': 'Список Позиций',
'List Problems': 'Список Проблем',
'List Projections': 'Список Прогнозы',
'List Projects': 'Список Проектов',
'List Rapid Assessments': 'Список Быстрых Оценок',
'List Received Items': 'Список полученных элементов',
'List Received Shipments': 'Список Полученных партий товара',
'List Records': 'Список Записей',
'List Registrations': 'Список Регистраций',
'List Reports': 'Список Отчетов',
'List Request Items': 'Запрос списка Элементов',
'List Requests': 'Список Запросы',
'List Resources': 'Список ресурсов',
'List Rivers': 'Список Рек',
'List Roles': 'Показать роли',
'List Rooms': 'Список Комнат',
'List Scenarios': 'Список Сценариев',
'List Sections': 'Список разделов',
'List Sectors': 'Список секторов',
'List Sent Items': 'Список отправленных элементов',
'List Sent Shipments': 'Список Отправленных Поставок',
'List Service Profiles': 'список профайлов службы',
'List Settings': 'Параметры списка',
'List Shelter Services': 'Список Жильем Services',
'List Shelter Types': 'Список Жильем Типы',
'List Shelters': 'Список Приюты',
'List Skill Equivalences': 'Список Навыков Эквиваленты',
'List Skill Provisions': 'Список Навыков Положения',
'List Skill Types': 'Список Skill Types',
'List Skills': 'Список Навыков',
'List Solutions': 'Список решений',
'List Staff Types': 'Список сотрудников типы',
'List Status': 'Статус списка',
'List Subscriptions': 'Список Подписок',
'List Subsectors': 'Список Подсекторов',
'List Support Requests': 'Перечислите Запросы на поддержку',
'List Survey Answers': 'Составьте список Ответов на вопросы исследования',
'List Survey Questions': 'Перечислите вопросы исследования',
'List Survey Series': 'Перечислите группы исследования',
'List Survey Templates': 'Перечислите шаблоны исследования',
'List Tasks': 'Задачи списка',
'List Teams': 'Список Групп',
'List Themes': 'Список Темы',
'List Tickets': 'Список Паспорта',
'List Tracks': 'Список Дорожек',
'List Trainings': 'Список &обучения',
'List Units': 'Список Единиц',
'List Users': 'Список пользователей',
'List Warehouses': 'Список хранилищ',
'List all': 'Показать все',
'List available Scenarios': 'Список доступных Сценариев',
'List of Items': 'Список элементов',
'List of Missing Persons': 'Из списка пропавших без вести лиц',
'List of Peers': 'Список из Peers',
'List of Reports': 'Из списка докладов',
'List of Requests': 'Из списка Запросы',
'List of Spreadsheets uploaded': 'Список из Таблицы закачан',
'List of Spreadsheets': 'Из списка Электронных',
'List of Volunteers for this skill set': 'Из списка Добровольцев для этого набора навыков',
'List of Volunteers': 'Из списка Добровольцев',
'List of addresses': 'Из списка адресов',
'List unidentified': 'Список неидентифицированные',
'List': 'список',
'List/Add': 'Список/Add',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Список \\ " кто чем занимается и где\\ ". Это позволяет агентствам по оказанию помощи пострадавшим координировать свою деятельность',
'Live Help': 'Live Поможет',
'Livelihood': 'Существованию',
'Load Cleaned Data into Database': 'Нагрузочного Очистки в базе данных',
'Load Raw File into Grid': 'Нагрузочного Raw в Сетке',
'Loading': 'загрузка',
'Local Name': 'Локальное имя',
'Local Names': 'Локальные имена',
'Location 1': 'Участок 1',
'Location 2': 'Участок 2',
'Location Details': 'Подробная информация о положении',
'Location Hierarchy Level 0 Name': 'Расположение уровень иерархии 0 имя',
'Location Hierarchy Level 1 Name': 'Расположение уровень иерархии 1 имя',
'Location Hierarchy Level 2 Name': 'Расположение уровень иерархии 2 имя',
'Location Hierarchy Level 3 Name': 'Расположение уровень иерархии 3 имя',
'Location Hierarchy Level 4 Name': 'Расположение уровень иерархии 4 имя',
'Location Hierarchy Level 5 Name': 'Расположение уровень иерархии 5 имя',
'Location added': 'Участок добавлен',
'Location deleted': 'Участок удален',
'Location group cannot be a parent.': 'Расположение группы не может быть родительским.',
'Location group cannot have a parent.': 'Расположение группы не может иметь родительский объект.',
'Location groups can be used in the Regions menu.': 'Расположение группы могут использоваться в регионах меню.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Расположение группы могут использоваться для отбора показано на карте и включения в результаты поиска только те сущности охватываемым расположениях из группы.',
'Location updated': 'Расположение обновляется',
'Location': 'расположение',
'Location:': 'Расположение:',
'Locations of this level need to have a parent of level': 'Расположениях этого уровня должны иметь родительский уровень',
'Locations': 'расположения',
'Log Entry Details': 'Сведения о записи протокола',
'Log entry added': 'Добавлена запись журнала',
'Log entry deleted': 'Запись удалена журнала',
'Log entry updated': 'Запись обновлена журнала',
'Log': 'протокол',
'Login': 'идентификатор входа',
'Logistics Management System': 'Системы управления материально -',
'Logistics': 'Материально',
'Logo file %s missing!': 'Файл логотипа% s отсутствует!',
'Logo': 'Эмблема',
'Logout': 'Закрыть сеанс',
'Long Text': 'Длинный текст',
'Longitude is West - East (sideways).': 'Долгота - запад - восток (сползает).',
'Longitude is West-East (sideways).': 'Долгота - запад - восток (сползает).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Долгота - ноль на меридиане (среднее время по гринвичу) и является положительным на востоке, в европе и азии. Longitude является отрицательным, на запад в атлантике и в южной америке.',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Долгота - ноль на меридиане (по Гринвичского), соединенное королевство и благоприятно влияет на востоке, в европе и азии. Longitude является отрицательным, на запад в атлантике и в южной америке.',
'Longitude of Map Center': 'Долгота центра карты',
'Longitude of far eastern end of the region of interest.': 'Долгота и дальнего востока в конце области интересов.',
'Longitude of far western end of the region of interest.': 'Долгота в крайнем западе в конце региона интерес.',
'Longitude %(lon)s is invalid, should be between %(lon_min)s & %(lon_max)s': 'Долгота %(lon)s является недействительным, должна быть между %(lon_min)s & %(lon_max)s',
'Longitude': 'Долгота',
'Looting': 'Разграбление',
'Lost Password': 'Потерян пароль',
'Lost': 'Утерян',
'Low': 'Низкое',
'Magnetic Storm': 'Магнитного Грозы',
'Major Damage': 'Основной ущерб',
'Major expenses': 'Основные расходы',
'Major outward damage': 'Основное внешнее повреждение',
'Make Commitment': 'Выполнять обязательство',
'Make New Commitment': 'Сделать новое обязательство',
'Make Request': 'Сделать Запрос',
'Make preparations per the <instruction>': 'Сделать подготовки на<instruction>',
'Male': 'Мужской',
'Manage Relief Item Catalogue': 'Управление Чрезвычайной Элемента Каталога',
'Manage Users & Roles': 'Управление пользователями и ролями',
'Manage Warehouses/Sites': 'Управление хранилищами/сайтами',
'Manage Your Facilities': 'Управлять своими возможностями',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Управлять запросами на расходные материалы, активы, сотрудников или других ресурсов. Соответствует против Кадастров где предоставляет будут затребованы.',
'Manage requests of hospitals for assistance.': 'Управление запросами из больниц о помощи.',
'Manage volunteers by capturing their skills, availability and allocation': 'Управление волонтерами с учетом их навыков, наличия и расположения',
'Managing Office': 'Управление Office',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Обязательный. В GeoServer, то это имя слоя. В WFS getCapabilities, то это FeatureType часть имени после двоеточия (:).',
'Mandatory. The URL to access the service.': 'Обязательный. URL, доступа к службе.',
'Manual Synchronization': 'Синхронизация вручную',
'Manual': 'руководство',
'Many': 'Множество',
'Map Center Latitude': 'Широта центра карты',
'Map Center Longitude': 'Долгота центра карты',
'Map Profile Details': 'Детали конфигурации отображения',
'Map Profile added': 'Конфигурация отображения Добавлена',
'Map Profile deleted': 'Конфигурация отображения удалена',
'Map Profile removed': 'Конфигурация отображения перемещена',
'Map Profile updated': 'Обновление конфигурации отображения',
'Map Profile': 'Конфигурация отображения',
'Map Profiles': 'Конфигурации отображения',
'Map Height': 'Высота карты',
'Map Service Catalog': 'Map Службы Каталога',
'Map Settings': 'Параметры карты',
'Map Viewing Client': 'Карты клиента просмотра',
'Map Width': 'Ширина карты',
'Map Zoom': 'Карты Масштаба',
'Map of Hospitals': 'Карты из Больницы',
'Map': 'Схема',
'Marine Security': 'Морская безопасность',
'Marital Status': 'Семейное положение',
'Marker Details': 'Детали маркера',
'Marker added': 'Добавлен маркер',
'Marker deleted': 'Маркер удален',
'Marker updated': 'Маркер обновляется',
'Marker': 'Маркер',
'Markers': 'Маркеры',
'Master Message Log to process incoming reports & requests': 'Главный журнал сообщений для обработки входящих отчеты& запросы',
'Master Message Log': 'Главный журнал сообщений',
'Match Percentage': 'Процент соответствия',
'Match Requests': 'Сопоставить запросы',
'Match percentage indicates the % match between these two records': 'Процент соответствия указывает % соответствия между этими двумя записями',
'Match?': 'Соответствует?',
'Matching Catalog Items': 'Соответствующие элементы каталога',
'Matching Items': 'Совпадающие элементы',
'Matching Records': 'Соответствующие записи',
'Maximum Location Latitude': 'Максимальное положение Широты',
'Maximum Location Longitude': 'Максимальное положение Longitude',
'Medical and public health': 'Медицинское и общественного здравоохранения',
'Medium': 'средняя',
'Megabytes per Month': 'Мегабайты в месяц',
'Member removed from Group': 'Членство удалено',
'Members': 'Члены',
'Membership Details': 'Детали членства',
'Membership updated': 'Членство обновлено',
'Membership': 'членство',
'Memberships': 'Членства',
'Message Details': 'Детали сообщения',
'Message Variable': 'Переменная Сообщения',
'Message added': 'Сообщение добавлено',
'Message deleted': 'Сообщение удалено',
'Message updated': 'Сообщение обновлено',
'Message variable': 'Переменная сообщения',
'Message': 'Сообщение',
'Messages': 'сообщения',
'Messaging settings updated': 'Параметры передачи сообщений обновлены',
'Messaging': 'Сообщения',
'Meteorite': 'метеорит',
'Meteorological (inc. flood)': 'Метеорологические (inc. Наводнениям)',
'Method used': 'Использованный метод',
'Middle Name': 'второе имя',
'Migrants or ethnic minorities': 'Мигранты или этнические меньшинства',
'Military': 'Военные',
'Minimum Location Latitude': 'Минимальное положение Широты',
'Minimum Location Longitude': 'Минимальное положение Долготы',
'Minimum shift time is 6 hours': 'Минимальный сдвиг по времени - 6 часов',
'Minor Damage': 'Незначительные повреждения',
'Minorities participating in coping activities': 'Меньшинства, участвующие в правозащитной деятельности',
'Minutes must be a number between 0 and 60': 'Минут должен иметь значение между 0 и 60',
'Minutes per Month': 'Минут за месяц',
'Minutes should be a number greater than 0 and less than 60': 'Минут должно быть числом больше 0 и меньше 60',
'Miscellaneous': 'Разное',
'Missing Person Details': 'Отсутствует сведения о сотруднике',
'Missing Person Registry': 'Отсутствует Лицо Реестра',
'Missing Person': 'Отсутствует Лицо',
'Missing Persons Registry': 'Пропавших Реестра',
'Missing Persons Report': 'Пропавших Отчета',
'Missing Persons': 'Пропавших',
'Missing Report': 'Отсутствует Отчета',
'Missing Senior Citizen': 'Отсутствует Высокопоставленных Гражданину',
'Missing Vulnerable Person': 'Отсутствует Уязвимом Лицо',
'Missing': 'Отсутствующий',
'Mission Details': 'Детали миссии',
'Mission Record': 'Запись миссии',
'Mission added': 'Миссии добавлен',
'Mission deleted': 'Миссия удалена',
'Mission updated': 'Миссия обновлена',
'Missions': 'Миссии',
'Mobile Basic Assessment': 'Mobile Basic Оценки',
'Mobile Phone': 'Мобильный телефон',
'Mobile': 'Поддержка мобильных устройств',
'Mode': 'режим',
'Model/Type': 'Модель/тип',
'Modem Settings': 'установки модема',
'Modem settings updated': 'Установки модема обновлены',
'Modem': 'Модем',
'Moderate': 'Средняя',
'Moderator': 'модератор',
'Modify Information on groups and individuals': 'Изменить информацию на группы и лица',
'Modifying data in spreadsheet before importing it to the database': 'Изменение данных в электронной таблице перед импортом ее в базу данных',
'Module provides access to information on current Flood Levels.': 'Модуль предоставляет доступ к информации о текущих уровнях наводнений.',
'Module': 'Модуль',
'Monday': 'Понедельник',
'Monthly Cost': 'Месячные расходы',
'Monthly Salary': 'Ежемесячной заработной',
'Months': 'месяцы',
'Morgue Status': 'Состояние морг',
'Morgue Units Available': 'Доступных единиц морг',
'Mosque': 'Мечети',
'Motorcycle': 'Мотоциклов',
'MultiPolygon': 'мультимногоугольник',
'Multiple Matches': 'Несколько соответствий',
'Multiple': 'множественный',
'Muslim': 'Мусульманское',
'Must a location have a parent location?': 'Должно расположение иметь родительское расположение?',
'My Current function': 'Моя текущей функции',
'My Tasks': 'Список задач',
'N/A': 'Отсутствует',
'NO': 'нет',
'NZSEE Level 1': 'NZSEE уровень 1',
'NZSEE Level 2': 'NZSEE уровень 2',
'Name and/or ID': 'Имя и/или ид',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Имя этого файла (& необязательно sub - path), расположенных в статическими которые следует использовать для фона заголовка.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Имя этого файла (& необязательно sub - path), расположенных в статическими которые следует использовать для верхнего левого изображения.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Имя этого файла (& необязательно sub - path), расположенные в панелях которые следует использовать для колонтитула.',
'Name of the person in local language and script (optional).': 'Имя лица в местных языком и письменностью (необязательно).',
'Name': 'Имя',
'Name, Org and/or ID': 'Имя, Org и/или ид',
'Names can be added in multiple languages': 'Имена могут быть добавлены на нескольких языках',
'National ID Card': 'Национальные ID Карты',
'National NGO': 'Национальное нпо',
'National': 'Национальный',
'Nationality of the person.': 'Гражданство этого лица.',
'Nationality': 'Гражданство',
'Nautical Accident': 'Несчастный случай на море',
'Nautical Hijacking': 'Морское пиратство',
'Need Type Details': 'требуются детали типа',
'Need Type added': 'Требуемый тип добавлен',
'Need Type deleted': 'Требуемый тип удален',
'Need Type updated': 'Требуется обновление типа',
'Need Type': 'Требуется тип',
'Need Types': 'Требуются типы',
'Need added': 'Необходимо добавить',
'Need deleted': 'Необходимо удалить',
'Need to be logged-in to be able to submit assessments': 'Должны зарегистрироваться на сможет представить оценки',
'Need to configure Twitter Authentication': 'Необходимо настроить Twitter Подлинности',
'Need to specify a Budget!': 'Необходимо указать Бюджета!',
'Need to specify a Kit!': 'Необходимо указать Комплект!',
'Need to specify a Resource!': 'Необходимо указать ресурс!',
'Need to specify a bundle!': 'Необходимо указать комплект!',
'Need to specify a group!': 'Необходимо указать группу!',
'Need to specify a location to search for.': 'Необходимо указать расположение для поиска.',
'Need to specify a role!': 'Требуется определить роль!',
'Need to specify a table!': 'Требуется указать таблицу!',
'Need to specify a user!': 'Требуется указать пользователя!',
'Need updated': 'Требуется обновление',
'Needs Details': 'Потребности Сведения',
'Needs Maintenance': 'нуждается в обслуживании',
'Needs to reduce vulnerability to violence': 'Необходимо для уменьшения уязвимости к насилию',
'Needs': 'Потребности',
'Negative Flow Isolation': 'Изоляция отрицательного потока',
'Neighborhood': 'соседство',
'Neighbouring building hazard': 'Опастность прилегающей застройки',
'Neonatal ICU': 'Неонатальной ICU',
'Neonatology': 'Бойлерного',
'Network': 'сеть',
'Neurology': 'Неврологии',
'New Assessment reported from': 'Новая оценка сообщило из',
'New Certificate': 'Новый сертификат',
'New Checklist': 'Новая контрольная таблица',
'New Entry': 'Создать запись',
'New Event': 'Новое событие',
'New Item Category': 'Новой категории номенклатур',
'New Job Role': 'Новое задание Роль',
'New Location Group': 'Новое расположение Группы',
'New Location': 'Новое расположение',
'New Peer': 'Новый равноправный пользователь сети',
'New Record': 'Новая запись',
'New Request': 'Новый запрос',
'New Scenario': 'Новый Сценарий',
'New Skill': 'Создать навык',
'New Solution Choice': 'Новый выбора решения',
'New Staff Member': 'Новый сотрудник',
'New Support Request': 'Новый запрос о поддержке',
'New Synchronization Peer': 'Новая синхронизация Однорангового',
'New Team': 'Новая Группа',
'New Training Course': 'Новый курс подготовки',
'New Volunteer': 'Новый волонтер',
'New cases in the past 24h': 'Новые случаи в прошлом 24h',
'New': 'создать',
'News': 'Новости',
'No Activities Found': 'Действия не найдены',
'No Alternative Items currently registered': 'Нет зарегистрированных к настоящему моменту альтернативных элементов',
'No Assessment Summaries currently registered': 'В данный момент нет зарегистрированных суммарных оценок',
'No Assessments currently registered': 'В данный момент нет зарегистрированных оценок',
'No Assets currently registered in this event': 'Нет зарегистрированных к настоящему моменту оценок этому событию',
'No Assets currently registered in this scenario': 'Нет зарегистрированных к настоящему моменту оценок этому сценарию',
'No Assets currently registered': 'Не Активы в данный момент зарегистрированы',
'No Baseline Types currently registered': 'Не Baseline Типы зарегистрировано в',
'No Baselines currently registered': 'Не Исходных зарегистрировано в',
'No Brands currently registered': 'Не Марок в настоящее время зарегистрированы',
'No Budgets currently registered': 'Бюджетах не зарегистрировано в',
'No Bundles currently registered': 'Нет комплектов зарегистрировано в',
'No Camp Services currently registered': 'Не Лагере Услуг зарегистрировано в',
'No Camp Types currently registered': 'Не Лагере Типы зарегистрировано в',
'No Camps currently registered': 'Не Лагерях в данный момент зарегистрированы',
'No Catalog Items currently registered': 'Элементы каталога не зарегистрировано в',
'No Catalogs currently registered': 'Каталоги не зарегистрировано в',
'No Checklist available': 'Не Checklist доступны',
'No Cluster Subsectors currently registered': 'Подсекторов кластера не зарегистрировано в',
'No Clusters currently registered': 'Не Кластерам в данный момент зарегистрированы',
'No Commitment Items currently registered': 'Элементы фиксации не зарегистрировано в',
'No Commitments': 'Никаких обязательств',
'No Credentials currently set': 'Не Учетные текущее значение',
'No Details currently registered': 'В данный момент сообщений о подробностях отсутствуют',
'No Documents found': 'Документы не найдены',
'No Donors currently registered': 'В настоящее время нет зарегистрированных доноров',
'No Events currently registered': 'События не зарегистрировано в',
'No Facilities currently registered in this event': 'Не Средства зарегистрированных на настоящий момент это событие',
'No Facilities currently registered in this scenario': 'Не Средства зарегистрированных на настоящий момент этот сценарий',
'No Feature Layers currently defined': 'Компонентов не Уровнями определенных',
'No Flood Reports currently registered': 'Не Наводнениям Отчеты зарегистрировано в',
'No Groups currently defined': 'Группы не определенные в данный момент',
'No Groups currently registered': 'Группы не зарегистрировано в',
'No Hospitals currently registered': 'В настоящее время нет зарегистрированных больниц',
'No Human Resources currently registered in this event': 'Не людские ресурсы зарегистрированных на настоящий момент это событие',
'No Human Resources currently registered in this scenario': 'Не людские ресурсы зарегистрированных на настоящий момент этот сценарий',
'No Identification Report Available': 'Нет доступных идентификационных отчетов',
'No Identities currently registered': 'В данный момент нет зарегистрированных идентификаторов',
'No Image': 'Нет изображения',
'No Images currently registered': 'В данный момент нет зарегистрированного изображения',
'No Impact Types currently registered': 'Не Влияние Типы зарегистрировано в',
'No Impacts currently registered': 'В данный момент нет зарегистрированных влияний',
'No Incident Reports currently registered': 'Не Инцидент Отчеты зарегистрировано в',
'No Incoming Shipments': 'Не поступающих грузов',
'No Item Categories currently registered': 'Не категорий элементов в данный момент зарегистрированы',
'No Item Packs currently registered': 'Элемент не Packs зарегистрировано в',
'No Items currently registered in this Inventory': 'Нет элементов зарегистрированных на настоящий момент этот перечень',
'No Items currently registered': 'Нет зарегистрированных элементов',
'No Keys currently defined': 'Нет ключей, определенных в данный момент',
'No Kits currently registered': 'В данный момент нет зарегистрированных наборов',
'No Level 1 Assessments currently registered': 'В данный момент нет зарегистрированных оценок уровеня 1',
'No Level 2 Assessments currently registered': 'В данных момент нет зарегистрированных оценок Уровня 2',
'No Locations currently available': 'В настоящий момент нет доступных помещений',
'No Locations currently registered': 'В настоящий момент нет зарегистрированных помещений',
'No Map Profiles currently defined': 'Не конфигураций отображения определенных',
'No Map Profiles currently registered in this event': 'Не конфигураций отображения зарегистрированных на настоящий момент это событие',
'No Map Profiles currently registered in this scenario': 'Не конфигураций отображения зарегистрированных на настоящий момент этот сценарий',
'No Markers currently available': 'Маркеры в данный момент недоступны',
'No Match': 'Нет совпадения',
'No Matching Catalog Items': 'Нет соответствующих элементов каталога',
'No Matching Items': 'Нет подходящих элементов',
'No Matching Records': 'Не соответствующих записей',
'No Members currently registered': 'В настоящее время нет зарегистрированных членов',
'No Memberships currently defined': 'В данный момент принадлежность не определена',
'No Messages currently in Outbox': 'В настоящее время нет исходящих сообщений',
'No Need Types currently registered': 'Нет необходимости Типы зарегистрировано в',
'No Needs currently registered': 'Не Нужно в настоящее время зарегистрированы',
'No Offices currently registered': 'В настоящее время нет зарегистрированных министерств',
'No Offices found!': 'Не найдено министерств!',
'No Organizations currently registered': 'В настоящее время нет зарегистрированных организаций',
'No People currently registered in this camp': 'Нет людей, зарегистрированных на настоящий момент в этом лагере',
'No People currently registered in this shelter': 'На настоящий момент нет людей, зарегистрированных в этом убежище',
'No Persons currently registered': 'На настоящий момент нет зарегистрированных лиц',
'No Persons currently reported missing': 'На настоящий момент нет лиц, о которых сообщено, что они пропали',
'No Persons found': 'Никто не найден',
'No Photos found': 'Нет найденных Фотографий',
'No Picture': 'Нет рисунка',
'No Population Statistics currently registered': 'Нет статистики населения зарегистрированной к настоящему моменту',
'No Presence Log Entries currently registered': 'Не Присутствия записи журнала зарегистрировано в',
'No Problems currently defined': 'Никаких проблем в настоящее время,',
'No Projections currently defined': 'Никаких прогнозов момент определены',
'No Projects currently registered': 'Проекты не зарегистрировано в',
'No Rapid Assessments currently registered': 'Не Быстрой Оценки зарегистрировано в',
'No Received Items currently registered': 'Не полученных номенклатур зарегистрировано в',
'No Received Shipments': 'Не Получено Поставок',
'No Records currently available': 'Нет записей момент недоступна',
'No Request Items currently registered': 'В настоящий момент нет зарегистрированных элементов запросов',
'No Requests': 'Нет запросов',
'No Rivers currently registered': 'Не Реках зарегистрировано в',
'No Roles currently defined': 'Нет определенных в данный момент ролей',
'No Rooms currently registered': 'Нет помещений, зарегистрированных в настоящий момент',
'No Scenarios currently registered': 'Нет сценариев,зарегистрированных в настоящий момент',
'No Sections currently registered': 'Нет в данный момент зарегистрированных секций',
'No Sectors currently registered': 'Не Секторов в настоящее время зарегистрированы',
'No Sent Items currently registered': 'Не отправленные в данный момент зарегистрированы',
'No Sent Shipments': 'Не Отправляется Поставок',
'No Settings currently defined': 'Никакие параметры определенные в данный момент',
'No Shelter Services currently registered': 'Не Жильем Услуг зарегистрировано в',
'No Shelter Types currently registered': 'Не Жильем Типы зарегистрировано в',
'No Shelters currently registered': 'Не Приютов в данный момент зарегистрированы',
'No Solutions currently defined': 'Не Решения определенных в данный момент',
'No Staff Types currently registered': 'Нет сотрудников Типы зарегистрировано в',
'No Subscription available': 'Нет подписки доступны',
'No Subsectors currently registered': 'Не Подсекторов в данный момент зарегистрированы',
'No Support Requests currently registered': 'Не поддерживает Запросы зарегистрировано в',
'No Survey Answers currently entered.': 'Не Опроса Ответы введен в настоящий момент.',
'No Survey Questions currently registered': 'Не Обследований текущие вопросы зарегистрированными',
'No Survey Series currently registered': 'Не Обследования Ряда зарегистрировано в',
'No Survey Template currently registered': 'Не шаблон опроса зарегистрировано в',
'No Tasks with Location Data': 'Нет задиний с Данными о расположении',
'No Teams currently registered': 'Нет групп зарегистрировано в',
'No Themes currently defined': 'Не Тем момент определены',
'No Tickets currently registered': 'Не Паспорта в настоящее время зарегистрированы',
'No Tracks currently available': 'Не Отслеживает момент недоступна',
'No Users currently registered': 'Пользователи не зарегистрировано в',
'No Volunteers currently registered': 'Добровольцы не зарегистрировано в',
'No Warehouses currently registered': 'Не Склады в настоящее время зарегистрированы',
'No access at all': 'Нет доступа на всех',
'No access to this record!': 'Нет доступа к этой записи!',
'No action recommended': 'Нет рекомендованных действий',
'No conflicts logged': 'Нет конфликтов регистрируется',
'No contact information available': 'Отсутствует контактная информация доступна',
'No contacts currently registered': 'Контакты не зарегистрировано в',
'No data in this table - cannot create PDF!': 'Отсутствие данных в этой таблице - невозможно создать PDF!',
'No databases in this application': 'Нет баз данных в этом приложении',
'No dead body reports available': 'Сообщений о погибших нет',
'No entries found': 'Записи не найдены',
'No entries matching the query': 'Нет записей, query',
'No entry available': 'Запись не доступны',
'No location known for this person': 'Не известно место, куда можно разместить это лицо',
'No locations found for members of this team': 'Не найдены помещения для членов этой группы',
'No log entries matching the query': 'Нет записей в журналe соответствующих запросу',
'No messages in the system': 'Нет сообщений в системе',
'No peers currently registered': 'Не peers в данный момент зарегистрированы',
'No pending registrations found': 'Не ожидает найдены регистрации',
'No pending registrations matching the query': 'Нет ожидающих регистрации, query',
'No person record found for current user.': 'Не найдена личная запись для данного пользователя',
'No problem group defined yet': 'Не проблема группу, еще',
'No records matching the query': 'Нет записей, query',
'No reports available.': 'Нет доступных отчетов.',
'No reports currently available': 'В настоящее время нет доступных отчетов',
'No requests found': 'Запросов не найдено',
'No resources currently reported': 'Не в настоящее время ресурсы о',
'No service profile available': 'Службы не профайла',
'No skills currently set': 'Навыки не текущее значение',
'No staff or volunteers currently registered': 'Нет сотрудников или добровольцев зарегистрировано в',
'No status information available': 'Не доступные сведения о состоянии',
'No synchronization': 'Не синхронизации',
'No tasks currently registered': 'В настоящий момент нет зарегистрированных заданий',
'No template found!': 'Шаблон не найден!',
'No units currently registered': 'Единиц не зарегистрировано в',
'No volunteer availability registered': 'Не доброволец доступности зарегистрированными',
'No': 'нет',
'Non-structural Hazards': 'Не - структурных опасностей',
'None (no such record)': 'Нет (нет такой записи)',
'None': 'Отсутствует',
'Normal': 'Стандартный',
'Not Applicable': 'нет',
'Not Authorised!': 'Не авторизован!',
'Not Possible': 'Невозможно',
'Not Set': 'Не установлен',
'Not Authorized': 'Не авторизован',
'Not installed or incorrectly configured.': 'Не установлен или неверно сконфигурирована.',
'Not yet a Member of any Group': 'В настоящее время принадлежность не зарегистрирована',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Обратите внимание, что этот список только показаны активные добровольцев. Для просмотра всех людей зарегистрирована в системе поиска, на этом экране вместо',
'Notice to Airmen': 'Обратите внимание на Вертолетчика',
'Number of Columns': 'Число столбцов',
'Number of Patients': 'Количество пациентов',
'Number of Rows': 'Число строк',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Ожидаемое количество дополнительных коек такого типа в течение ближайших суток в этом подразделении.',
'Number of alternative places for studying': 'Количество альтернативных мест для получения образования',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Количество имующихся в наличии/вакантных коек этого типа в данном подразделении на отчетное время.',
'Number of deaths during the past 24 hours.': 'Число погибших за последние 24 часа.',
'Number of discharged patients during the past 24 hours.': 'Количество пациентов, выписанных из больницы за последние сутки.',
'Number of doctors': 'Количество врачей',
'Number of in-patients at the time of reporting.': 'Количество стационарных больных на момент отчетности.',
'Number of newly admitted patients during the past 24 hours.': 'Количество вновь принятых за последние 24 часа пациентов .',
'Number of non-medical staff': 'Количество немедицинского персонала',
'Number of nurses': 'Количество медицинских сестер',
'Number of private schools': 'количество частных школ',
'Number of public schools': 'Количество государственных школ',
'Number of religious schools': 'Количество религиозных школ',
'Number of residential units not habitable': 'Число необитаемых жилых единиц',
'Number of residential units': 'Количество жилых единиц',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Количество свободных/доступных коек в этой больнице. Автоматически обновляется из ежедневных отчетов.',
'Number of vacant/available units to which victims can be transported immediately.': 'Количество свободных/доступных комплексов, куда можно немедленно транспортировать пострадавших',
'Number or Label on the identification tag this person is wearing (if any).': 'Номер или идентификационная ярлык на метке, который носит данное лицо (если она есть).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Номер или код используется для маркировки, на поиск, например флага, таблицу coordinates, сайта или порядковый номер аналогичного (если доступно)',
'Number': 'Число',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Число/Процент пострадавшего населения, то есть женский пол и возраст 0 - 5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Число/Процент пострадавшего населения, то есть женский пол и возраст 13 - 17',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Число/Процент пострадавших людей, - женщины& В Возрасте 18 - 25',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Число/Процент пострадавших людей, - женщины& В Возрасте 26 - 60',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Число/Процент пострадавших людей, - женщины& В Возрасте 6 - 12',
'Number/Percentage of affected population that is Female & Aged 61+': 'Число/Процент пострадавших людей, - женщины& Возрасте 61+',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Число/Процент пострадавших людей, - мужчины& В Возрасте 0 - 5',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Число/Процент пострадавших людей, - мужчины& В Возрасте 13 - 17',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Число/Процент пострадавших людей, - мужчины& В Возрасте 18 - 25',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Количество/Процент пострадавшего населения, то есть мужчин В Возрасте 26 - 60',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Количество/Процент пострадавшего населения, то есть мужчины в возрасте 6 - 12',
'Number/Percentage of affected population that is Male & Aged 61+': 'Количество/Процент пострадавшего населения, то есть мужчин в возрасте 61+',
'Nursery Beds': 'Койки в детсаду',
'Nutrition problems': 'Проблемы питания',
'Nutrition': 'Питание',
'OK': 'ОК',
'OR Reason': 'ИЛИ Причина',
'OR Status Reason': 'ИЛИ причина состояния',
'OR Status': 'ИЛИ Состояние',
'Observer': 'наблюдатель',
'Obsolete': 'Устарел',
'Obstetrics/Gynecology': 'акушерство/гинекология',
'Office Address': 'Адрес офиса',
'Office Details': 'Сведения об офисе',
'Office Phone': 'Рабочий телефон',
'Office added': 'Офис добавлен',
'Office deleted': 'Офис удален',
'Office updated': 'Office обновлен',
'Office': 'Офис',
'Offices & Warehouses': 'Отделения& Склады',
'Offices': 'Отделения',
'Offline Sync (from USB/File Backup)': 'Offline Sync (с USB/файл резервной копии)',
'Offline Sync': 'Автономная синхронизация',
'Older people as primary caregivers of children': 'Пожилых людей как первичный уход детей',
'Older people in care homes': 'Пожилых людей в детских',
'Older people participating in coping activities': 'Пожилых людей участвующих в преодолении activities',
'Older person (>60 yrs)': 'Лицо старше (>60 подшаблоне)',
'On by default? (only applicable to Overlays)': 'По умолчанию? (применимо только к оверлеи)',
'On by default?': 'По умолчанию?',
'One Time Cost': 'Единовременная стоимость',
'One time cost': 'Единовременная стоимость',
'One-time costs': 'Один - время затрат',
'One-time': 'Одноразовый',
'Oops! Something went wrong...': 'Оп! Примерно причину..',
'Oops! something went wrong on our side.': 'Оп! Причину - на нашей стороне.',
'Opacity (1 for opaque, 0 for fully-transparent)': 'Opacity (1 для opaque, 0 для полное прозрачный)',
'Open area': 'Открыть область',
'Open recent': 'Открыть последние',
'Open': 'открыть',
'Operating Rooms': 'Операционных Комнат',
'Optional link to an Incident which this Assessment was triggered by.': 'Необязательно ссылка на инцидента, который эта оценка была триггером.',
'Optional': 'необязательный',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Это необязательный параметр. Если вы хотите стиля к функции на основе значений атрибута, выберите атрибут для здесь.',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Это необязательный параметр. В GeoServer, это рабочей области URI пространства имен (не имя!). в WFS getCapabilities, то это FeatureType часть имени, прежде чем двоеточием (:).',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Это необязательный параметр. Собой имя элемента содержимое которого должно быть URL файла изображения помещаются в всплывающими окнами.',
'Optional. The name of an element whose contents should be put into Popups.': 'Это необязательный параметр. - имя элемента, содержимое которых необходимо поставить в всплывающими окнами.',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Это необязательный параметр. Имя схемы. В Geoserver это имеет форме http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'Options': 'параметры',
'Organization Details': 'Сведения об организации',
'Organization Registry': 'Организации Реестра',
'Organization added': 'Организации добавлен',
'Organization deleted': 'Организации удален',
'Organization updated': 'Организации обновляется',
'Organization': 'структура',
'Organizations': 'Организации',
'Origin of the separated children': 'Происхождения разлученных детей',
'Origin': 'Способ создания',
'Other (describe)': 'Другие (описать)',
'Other (specify)': 'Другие (указать)',
'Other Evidence': 'Другие доказательства',
'Other Faucet/Piped Water': 'Другая вода из крана/трубопровода',
'Other Isolation': 'Другая изоляция',
'Other Name': 'Другое имя',
'Other activities of boys 13-17yrs before disaster': 'Другие виды деятельности мальчиков 13 - 17лет до бедствия',
'Other activities of boys 13-17yrs': 'Другие виды деятельности мальчиков 13 - 17лет',
'Other activities of boys <12yrs before disaster': 'Другие виды деятельности мальчиков <12yrs до бедствия',
'Other activities of boys <12yrs': 'Другие виды деятельности мальчиков <12 лет',
'Other activities of girls 13-17yrs before disaster': 'Другие виды деятельности девочек 13 - 17 лет до бедствия',
'Other activities of girls 13-17yrs': 'Другие виды деятельности девочек 13 - 17лет',
'Other activities of girls<12yrs before disaster': 'Другие виды деятельности девочек <12 лет до бедствия',
'Other activities of girls<12yrs': 'Другие виды деятельности девочек <12 лет',
'Other alternative infant nutrition in use': 'Другие альтернативные младенческой питания используется в',
'Other alternative places for study': 'Другие альтернативные места для исследования',
'Other assistance needed': 'Другие необходимую помощь',
'Other assistance, Rank': 'Другой помощи, Ранг',
'Other current health problems, adults': 'Другие текущие проблемы охраны здоровья, взрослыми',
'Other current health problems, children': 'Другие текущие проблемы здравоохранения, детей',
'Other events': 'другие события',
'Other factors affecting school attendance': 'Другие факторы, влияющие на школьную посещаемость',
'Other major expenses': 'Другие основные расходы',
'Other non-food items': 'Других - непродовольственных товаров',
'Other recommendations': 'Другие рекомендации',
'Other residential': 'Других жилых',
'Other school assistance received': 'Других школьных помощи полученных',
'Other school assistance, details': 'Других школьных помощи, сведения',
'Other school assistance, source': 'Других школьных помощи, источник',
'Other settings can only be set by editing a file on the server': 'Другие параметры можно задать только отредактировав файл на сервере',
'Other side dishes in stock': 'Другой стороне блюда на складе',
'Other types of water storage containers': 'Других типов воды хранения контейнеров',
'Other ways to obtain food': 'Другие способы получения продовольственной',
'Other': 'Другой',
'Outbound Mail settings are configured in models/000_config.py.': 'Исходящей почты заданы параметры в моделях/000_config. py.',
'Outbox': 'Исходящие',
'Outgoing SMS Handler': 'Исходящие SMS Обработчик',
'Outgoing SMS handler': 'Исходящие SMS обработчик',
'Overall Hazards': 'Общей Опасности',
'Overhead falling hazard': 'Накладные снижения опасности',
'Overland Flow Flood': 'Оверленд Потока Наводнениями',
'Owned Resources': 'Принадлежащие ресурсы',
'PAHO UID': 'ПАОЗ UID',
'PIN number': 'Личный номер',
'PIN': 'Личный номер, код',
'PL Women': 'PL Женщин',
'Pack': 'Упаковать',
'Parameters': 'параметры',
'Parapets, ornamentation': 'Парапеты, украшения',
'Parent Office': 'Родительский Office',
'Parent needs to be of the correct level': 'Родителю необходимо быть на правильном уровне',
'Parent needs to be set for locations of level': 'Родительский необходимо установить для расположениях на уровне',
'Parent needs to be set': 'Родительский необходимо установить',
'Parent': 'Родитель',
'Parents/Caregivers missing children': 'Родителей/Уход пропавших детей',
'Partial': 'Частный',
'Participant': 'Участник',
'Pashto': 'Пушту',
'Pass': 'Успех',
'Passport': 'Паспорт',
'Password': 'Пароль',
'Path': 'путь',
'Pathology': 'Патология',
'Patients': 'Пациенты',
'Pediatric ICU': 'Педиатрических ICU',
'Pediatric Psychiatric': 'Педиатрический Психиатрический',
'Pediatrics': 'Педиатрия',
'Peer Details': 'Однорангового Сведения',
'Peer Registration Details': 'Однорангового сведения о регистрации',
'Peer Registration Request': 'Однорангового запрос о регистрации',
'Peer Registration': 'Однорангового Регистрации',
'Peer Type': 'Однорангового Тип',
'Peer UID': 'Однорангового UID',
'Peer added': 'Однорангового добавлен',
'Peer deleted': 'Однорангового удален',
'Peer not allowed to push': 'Однорангового не разрешено push',
'Peer registration request added': 'Однорангового запрос о регистрации добавлена',
'Peer registration request deleted': 'Однорангового регистрации запрос удален',
'Peer registration request updated': 'Однорангового запрос о регистрации обновления',
'Peer updated': 'Однорангового обновляется',
'Peer': 'смежная',
'Pending Requests': 'Ожидающие запросы',
'Pending': 'Отложен',
'People Needing Food': 'Люди, нуждающиеся в еде',
'People Needing Shelter': 'Люди, нуждающиеся в приюте',
'People Needing Water': 'Люди, нуждающиеся в воде',
'People Trapped': 'Люди, загнанные в ловушку',
'People': 'Участники',
'Performance Rating': 'Рейтинг производительности',
'Person 1': 'Лицо 1',
'Person 1, Person 2 are the potentially duplicate records': 'Лицо 1, лицо 2 являются потенциально дублирующими записями',
'Person 2': 'Лицо 2',
'Person De-duplicator': 'Person De - множительных',
'Person Details': 'Сведения о сотруднике',
'Person Registry': 'Регистрация субъекта',
'Person added to Group': 'Член группы добавлены',
'Person added to Team': 'Член группы добавлены',
'Person added': 'Лицо добавлено',
'Person deleted': 'Лицо удален',
'Person details updated': 'Обновленные сведения о сотруднике',
'Person interviewed': 'Субъект опрошен',
'Person who has actually seen the person/group.': 'Лицо, которое действительно видело, данного человека/группу.',
'Person': 'Человек',
'Person/Group': 'Субъект/Группа',
'Personal Data': 'Личные данные',
'Personal Effects Details': 'Сведения о личных вещах',
'Personal Effects': 'Личные вещи',
'Personal Map': 'Личные Карты',
'Personal Profile': 'Личный профиль',
'Personal impact of disaster': 'субъективное влияние бедствия',
'Persons in institutions': 'Лиц в учреждениях',
'Persons with disability (mental)': 'Лиц с инвалидностью (психической)',
'Persons with disability (physical)': 'Лиц с инвалидностью (физической)',
'Persons': 'Субъекты',
'Phone 1': 'Телефон 1',
'Phone 2': 'Телефон 2',
'Phone': 'Телефон',
'Phone/Business': 'Телефон/бизнес',
'Phone/Emergency': 'Телефон/чрезвычайное происшествие',
'Phone/Exchange (Switchboard)': 'Телефон/Exchange (Телефонист)',
'Photo Details': 'Фото Сведения',
'Photo Taken?': 'Фото приняты?',
'Photo added': 'Фото добавлено',
'Photo deleted': 'Фото удалено',
'Photo updated': 'Фото обновляется',
'Photo': 'фотография',
'Photograph': 'Фотография',
'Photos': 'Фотографии',
'Physical Description': 'Физическое описание',
'Physical Safety': 'Физической безопасности',
'Picture upload and finger print upload facility': 'Рисунок upload и finger распечатайте upload утилитой',
'Picture': 'изображение',
'Place of Recovery': 'Вместо Восстановления',
'Place': 'Группа страниц',
'Places for defecation': 'Места для опорожнения кишечника',
'Places the children have been sent to': 'Места, куда были посланы дети',
'Playing': 'Воспроизведение',
'Please correct all errors.': 'Пожалуйста, исправьте все ошибки.',
'Please enter a first name': 'Пожалуйста, введите имя',
'Please enter a site OR a location': 'Введите сайта или место',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Введите первые несколько букв или группы в автозаполнение.',
'Please enter the recipient': 'Введите получателей',
'Please fill this!': 'Введите этот!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Укажите URL - адрес страницы в, описание что вы ожидали произойдет и что на самом деле произошло.',
'Please report here where you are:': 'Просьба сообщить здесь где вы находитесь:',
'Please select another level': 'Выберите другой уровень',
'Please select': 'Пожалуйста, выберите',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Пожалуйста входа при мобильный телефон как это позволяет нам отправлять текстовые сообщения. Просьба включить полный код региона.',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Укажите любой проблемы и препятствия с соответствующими обработка болезнями, за detail (в цифрах, в соответствующих случаях). Можно также добавить предложений можно было бы улучшить положение.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Укажите в этом поле запишите любую дополнительную информацию, в том числе за всю историю запись его обновления.',
'Please use this field to record any additional information, including any Special Needs.': 'Укажите в этом поле запишите любую дополнительную информацию, включая любое особые потребности.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Используйте эту поле для записи всех дополнительную информацию, такую как Ushahidi ид экземпляра. Включить журнала записи, если его обновления.',
'Pledge Support': 'Обещание Поддержки',
'Point': 'Указать',
'Poisoning': 'Отравления',
'Poisonous Gas': 'Отравляющее Газа',
'Police': 'Полиции',
'Pollution and other environmental': 'Загрязнения и другого экологического',
'Polygon reference of the rating unit': 'Многоугольник ссылочного рейтинга блока',
'Polygon': 'Многоугольник',
'Poor': 'бедный',
'Population Statistic Details': 'Детали демографической статистики',
'Population Statistic added': 'демографическая статистика добавленв',
'Population Statistic deleted': 'демографическая статистика удалена',
'Population Statistic updated': 'демографическая статистика обновлена',
'Population Statistics': 'демографическая статистика',
'Population and number of households': 'Численность населения и количество семей',
'Population': 'Численность населения',
'Popup Fields': 'всплывающие поля',
'Popup Label': 'Всплывающая метка',
'Porridge': 'Овсяная каша',
'Port Closure': 'Порт Закрытия',
'Port': 'Порт',
'Position Catalog': 'Положение каталога',
'Position Details': 'Положение Сведения',
'Position added': 'Позиция добавлена',
'Position deleted': 'Позиция удалена',
'Position updated': 'Позиция обновлена',
'Position': 'позиция',
'Positions': 'Позиции',
'Postcode': 'Почтовый индекс',
'Poultry restocking, Rank': 'возобновление запасов домашней птицы, категория',
'Poultry': 'домашняя птица',
'Pounds': 'фунты',
'Power Failure': 'отключение электричества',
'Powered by Sahana': 'Поддерживается Sahana',
'Pre-cast connections': 'Pre - cast соединений',
'Preferred Name': 'Предпочтительное имя',
'Pregnant women': 'Беременные женщины',
'Preliminary': 'Предварительный',
'Presence Condition': 'Условие присутствия',
'Presence Log': 'Журнал Присутствия',
'Presence': 'Присутствие',
'Primary Occupancy': 'Основным Occupancy',
'Priority from 1 to 9. 1 is most preferred.': 'Приоритет от 1 до 9. 1 наиболее предпочтительно.',
'Priority': 'приоритет',
'Private': 'персональный',
'Problem Administration': 'Администрирование Проблемы',
'Problem Details': 'Сведения о неполадке',
'Problem Group': 'Проблема Группы',
'Problem Title': 'Проблема Title',
'Problem added': 'Проблема добавлена',
'Problem connecting to twitter.com - please refresh': 'Проблема подключения к twitter.com - регенерируйте, пожалуйста',
'Problem deleted': 'Неполадка удалена',
'Problem updated': 'Неполадка обновлена',
'Problem': 'Проблема',
'Problems': 'проблемы',
'Procedure': 'процедура',
'Process Received Shipment': 'Обработать полученную поставку',
'Process Shipment to Send': 'Обработать поставку для отправки',
'Profile': 'Профилирование',
'Project Details': 'Детали проекта',
'Project Status': 'Статус проекта',
'Project added': 'Проект добавлен',
'Project deleted': 'Проект удален',
'Project has no Lat/Lon': 'У проекта нет Широты/Долготы',
'Project updated': 'Проект обновляется',
'Project': 'Проект',
'Projection Details': 'Детали отображения',
'Projection added': 'Отображение добавлено',
'Projection deleted': 'Отображение удалено',
'Projection updated': 'Проекционная обновляется',
'Projection': 'отображение',
'Projections': 'Прогнозы',
'Projects': 'Проекты',
'Property reference in the council system': 'Ссылку на свойство в совете system',
'Protected resource': 'Защищенному ресурсу',
'Protection': 'защита',
'Provide Metadata for your media files': 'Предоставляют метаданные для файлов мультимедиа',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Для обеспечения необязательно эскизом в построение или повреждение пункта. Указать ущерба точек.',
'Proxy-server': 'Прокси - сервер',
'Psychiatrics/Adult': 'Психиатрия/Взрослая',
'Psychiatrics/Pediatric': 'Pсихиатрия/Детская',
'Public Event': 'общественное мероприятие',
'Public and private transportation': 'Общественный и частный транспорт',
'Public assembly': 'Общее собрание',
'Public': 'общественный',
'Pull tickets from external feed': 'Потяните паспорта из внешних новостей',
'Punjabi': 'Панджаби',
'Purchase Date': 'Дата приобретения',
'Push tickets to external system': 'Push паспорта на внешние системы',
'Pyroclastic Flow': 'Pyroclastic Потока',
'Pyroclastic Surge': 'Pyroclastic Резкий',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Серийный модуль не в запуск Python - это нужно установка на активируйте Модеме',
'Python needs the ReportLab module installed for PDF export': 'Модуль ReportLab недоступен внутри запущенного Python - тут необходима установка для вывода PDF!',
'Quantity Committed': 'Количество Зафиксированы',
'Quantity Fulfilled': 'Количество Выполнен',
'Quantity in Transit': 'Транспортируемое количество',
'Quantity': 'количество',
'Quarantine': 'карантин',
'Queries': 'запросы',
'Query': 'запрос',
'Queryable?': 'Может запрашиваться?',
'RC frame with masonry infill': 'Железобетонная несущая конструкция с заполнением кладкой',
'RECORD A': 'ЗАПИСИ,',
'RECORD B': 'ЗАПИСЬ B',
'Race': 'РАСЫ',
'Radio Callsign': 'Радиокнопку Callsign',
'Radiological Hazard': 'Радиационной Опасности',
'Radiology': 'Радиологии',
'Railway Accident': 'Железнодорожная авария',
'Railway Hijacking': 'Железнодорожной Угон',
'Rain Fall': 'Дождях Попадают',
'Rapid Assessment Details': 'Данные Оперативной оценки',
'Rapid Assessment added': 'Оперативная оценка добавлена',
'Rapid Assessment deleted': 'Оперативная оценка удалена',
'Rapid Assessment updated': 'Оперативная оценка обновлена',
'Rapid Assessment': 'Rapid Оценки',
'Rapid Assessments & Flexible Impact Assessments': 'Оперативная Оценка и Гибкая оценка последствий',
'Rapid Assessments': 'Оперативная Оценка',
'Rapid Close Lead': 'Директива быстрого закрытия',
'Rapid Data Entry': 'Быстрого ввода данных',
'Raw Database access': 'Необработанный доступ к базе данных',
'Receive New Shipment': 'Получения новой отгрузки',
'Receive Shipment': 'Получать партию товара',
'Receive this shipment?': 'Получать это поставка?',
'Receive': 'получить',
'Received By Person': 'Полученных по лицу',
'Received By': 'получено',
'Received Item Details': 'Детали полученного элемента',
'Received Item deleted': 'Полученный элемент удален',
'Received Item updated': 'Полученный элемент обновлен',
'Received Shipment Details': 'Детали полученной партии товара',
'Received Shipment canceled and items removed from Inventory': 'Полученная партия товара аннулирована и элементы удалены из описи',
'Received Shipment canceled': 'Полученная партия товара аннулирована',
'Received Shipment updated': 'Полученная партия товара обновляется',
'Received Shipments': 'Полученная партия товара',
'Received': 'получено',
'Receiving and Sending Items': 'Получения и отправки Элементов',
'Recipient': 'Получатель',
'Recipients': 'Получатели',
'Recommendations for Repair and Reconstruction or Demolition': 'Рекомендации для ремонта и восстановления или Сноса',
'Record Details': 'Подробности записей',
'Record Saved': 'Запись сохранена',
'Record added': 'Добавлена запись',
'Record any restriction on use or entry': 'Запись никаких ограничений на использование или записи',
'Record deleted': 'Запись удалена',
'Record last updated': 'Последнее обновление записи',
'Record not found!': 'Запись не найдена!',
'Record not found': 'Запись не найдена',
'Record updated': 'Записи обновлен',
'Record': 'Зарегистрировать',
'Recording and Assigning Assets': 'Записывание и назначение ресурсов',
'Records': 'записи',
'Recovery Request added': 'Требование восстановления добавляется',
'Recovery Request deleted': 'Восстановления запрос удален',
'Recovery Request updated': 'Требование восстановления обновляется',
'Recovery Request': 'Требование восстановления',
'Recovery Requests': 'Восстановления Запросы',
'Recovery': 'Устранение',
'Recurring Cost': 'Повторяющиеся Стоимость',
'Recurring cost': 'Повторяющиеся стоимость',
'Recurring costs': 'Периодических затрат',
'Recurring': 'Периодический',
'Red Cross / Red Crescent': 'Красного креста/красного полумесяца',
'Red': 'красный',
'Reference Document': 'Ссылочного Документа',
'Refresh Rate (seconds)': 'Скорость обновления (в секундах)',
'Region Location': 'Расположения регион',
'Regional': 'Региональные',
'Regions': 'Области',
'Register Person into this Camp': 'Зарегистриривать человека в этом лагере',
'Register Person into this Shelter': 'Лицо, зарегистрированное в этом убежище',
'Register Person': 'Зарегистрированное лицо',
'Register them as a volunteer': 'Зарегистрируйте их в качестве волонтеров',
'Register': 'Зарегистрируйте',
'Registered People': 'Зарегистрированные люди',
'Registered users can': 'Зарегистрированные пользователи могут',
'Registration Details': 'Сведения о регистрации',
'Registration added': 'Регистрации добавлен',
'Registration entry deleted': 'Регистрация запись удалена',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Регистрация ждёт одобрения из вышестоящей инстанции (%s) - пожалуйста подождите.',
'Registration updated': 'Регистрации обновлены',
'Registration': 'Регистрация',
'Rehabilitation/Long Term Care': 'Реабилитацией/долгосрочного ухода',
'Reinforced masonry': 'Укрепило каменщик',
'Rejected': 'Отклонено',
'Relief Team': 'Группа помощи',
'Relief': 'помощь',
'Religion': 'Религия',
'Religious Leader': 'Религиозный лидер',
'Religious': 'Религиозный',
'Relocate as instructed in the <instruction>': 'Перевозите как указание в<instruction>',
'Remove Asset from this event': 'Удалить участника из этого события',
'Remove Asset from this scenario': 'Удалить вклад из этого результата',
'Remove Facility from this event': 'Удалить участника из этого события',
'Remove Facility from this scenario': 'Удалить участника из этого сценария',
'Remove Human Resource from this event': 'Удалить людских ресурсов с этим событием',
'Remove Human Resource from this scenario': 'Удалить людских ресурсов из этого сценария',
'Remove Item from Inventory': 'Удалить элемент из Запасов',
'Remove Map Profile from this event': 'Удалить конфигурацию отображения с этим событием',
'Remove Map Profile from this scenario': 'Удалить конфигурацию отображения из этого сценария',
'Remove Person from Group': 'Удалить Членство',
'Remove Person from Team': 'Удалить Членство',
'Remove this asset from this event': 'Удалить этот актив из этого события',
'Remove this asset from this scenario': 'Удалить этот актив из этого сценария',
'Remove': 'удалить',
'Removed from Group': 'Членство удалено',
'Removed from Team': 'Членство удалено',
'Repair': 'Ремонта',
'Repaired': 'Ремонт',
'Repeat your password': 'Повторите ваш пароль',
'Replace if Master': 'Заменить если Master',
'Replace if Newer': 'Заменять, если новее',
'Replace': 'заменить',
'Report Another Assessment...': 'Отчет Другой Оценки.',
'Report Details': 'Сведения отчета',
'Report Resource': 'Отчет Ресурсов',
'Report Types Include': 'Типы отчетов Включают',
'Report added': 'Отчет добавлен',
'Report deleted': 'Отчет удален',
'Report my location': 'Сообщите мои координаты',
'Report the contributing factors for the current EMS status.': 'Доклад факторы для текущего EMS состояние.',
'Report the contributing factors for the current OR status.': 'Доклад факторы для текущего или состояние.',
'Report them as found': 'Отчет их как найти',
'Report them missing': 'О них отсутствует',
'Report updated': 'Отчет обновлен',
'Report': 'Сообщить',
'Reporter Name': 'Имя репортера',
'Reporter': 'репортер',
'Reporting on the projects in the region': 'Составление отчетности по проектам в регионе',
'Reports': 'отчеты',
'Request Added': 'Запрос Добавлен',
'Request Canceled': 'Требование отменено',
'Request Details': 'Сведения о запросе',
'Request From': 'Запуск',
'Request Item Details': 'Данные об объекте запроса',
'Request Item added': 'Объект Запроса добавлен',
'Request Item deleted': 'Объект Запроса удален',
'Request Item from Available Inventory': 'Запрос элемент из доступных запасов',
'Request Item updated': 'Запрос Элемент обновляется',
'Request Item': 'Объект Запроса',
'Request Items': 'Объекты Запроса',
'Request Status': 'состояние запроса',
'Request Type': 'Тип запроса',
'Request Updated': 'Запрос обновлен',
'Request added': 'Запрос добавлен',
'Request deleted': 'Запрос удален',
'Request for Role Upgrade': 'Запрос на повышение роли',
'Request updated': 'Запрос обновлен',
'Request': 'запрос',
'Request, Response & Session': 'Request, Response& Сеансу',
'Requested By Facility': 'С просьбой Facility',
'Requested By': 'Запрошен',
'Requested From': 'С просьбой',
'Requested Items': 'Запрошенные элементы',
'Requested by': 'Запрошен',
'Requested on': 'Запрошенный на',
'Requested': 'Запрошено',
'Requester': 'запрашивающая сторона',
'Requests Management': 'Запросы Управления',
'Requests': 'запросы',
'Requires Login!': 'Требует Входа!',
'Reset Password': 'переустановить пароль',
'Reset': 'Удалить',
'Resolve Conflict': 'Разрешить конфликт',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Разрешение ссылки извлекает новый экран, который помогает принять решение по этим дублирующимся записям и обновить базу данных.',
'Resolve': 'Разрешить',
'Resource Details': 'Детали ресурса',
'Resource added': 'Ресурс добавлен',
'Resource deleted': 'Ресурс удален',
'Resource updated': 'Ресурс обновлен',
'Resource': 'Ресурс',
'Resources': 'ресурсы',
'Respiratory Infections': 'Респираторные инфекции',
'Response': 'ответ',
'Restricted Access': 'Ограниченный доступ',
'Restricted Use': 'Ограниченного использования',
'Results': 'результаты',
'Retail Crime': 'Розничной Преступности',
'Retrieve Password': 'Получить пароль',
'Return to Request': 'Вернуться Запрос',
'Return': 'Назад',
'Returned From': 'Возвращаемый из',
'Returned': 'Возвращено',
'Review Incoming Shipment to Receive': 'Просмотрите Входящих Поставка получения',
'Rice': 'Рис',
'Riot': 'Беспорядки',
'River Details': 'Сведения о реке',
'River added': 'Река добавлена',
'River deleted': 'Река удалена',
'River updated': 'Река обновлена',
'River': 'Река',
'Rivers': 'Реки',
'Road Accident': 'автодорожное происшествие',
'Road Closed': 'Дорога Закрыта',
'Road Conditions': 'Дорожные условия',
'Road Delay': 'Задержка в пути',
'Road Hijacking': 'Угон на дороге',
'Road Usage Condition': 'Условие использования дороги',
'Role Details': 'Сведения о роли',
'Role Required': 'Роль Требуется',
'Role Updated': 'Роль Обновляется',
'Role added': 'Роль добавлена',
'Role deleted': 'Удалена роль',
'Role updated': 'Роль обновляется',
'Role': 'Роль',
'Role-based': 'Ролевой',
'Roles Permitted': 'Роли Разрешены',
'Roles': 'Роли',
'Roof tile': 'Крыша замостить',
'Roofs, floors (vertical load)': 'Крыш, фальшполом (вертикальная нагрузка)',
'Room Details': 'Сведения о помещении',
'Room added': 'Комната добавлена',
'Room deleted': 'Комната удалена',
'Room updated': 'Комната обновляется',
'Room': 'Комната',
'Rooms': 'Комнаты',
'Rows in table': 'Строк в таблице',
'Rows selected': 'Число выбранных строк',
'Run Interval': 'Интервал запуска',
'Running Cost': 'эксплуатационные расходы',
'Russian': 'русский',
'Safe environment for vulnerable groups': 'Безопасная среда для уязвимых групп',
'Safety Assessment Form': 'Форма оценки безопасности',
'Safety of children and women affected by disaster?': 'Безопасность детей и женщин, пострадавших в бедствии?',
'Sahana Administrator': 'Администратор Sahana',
'Sahana Blue': 'Голубая Sahana',
'Sahana Community Chat': 'Чат сообщества Sahana',
'Sahana Eden <=> Other': 'Sahana Эдема <=> Других',
'Sahana Eden <=> Sahana Eden': 'Sahana <=> Sahana Эдема Эдема',
'Sahana Eden Humanitarian Management Platform': 'Гуманитарная платформа управления "Эдем вассалов"',
'Sahana Eden Website': 'Sahana Эдема Вебсайте',
'Sahana Eden': 'Sahana Эдема',
'Sahana Green': 'Sahana Зеленый',
'Sahana Steel': 'Sahana Сталелитейных',
'Sahana access granted': 'Доступ к Sahana предоставлен',
'Salted Fish': 'Salted Рыбных',
'Sanitation problems': 'Санитарией проблем',
'Satellite': 'Спутник',
'Saturday': 'Суббота',
'Save': 'сохранить',
'Saved.': 'Сохранено.',
'Saving...': 'Сохранение...',
'Scale of Results': 'Шкала результатов',
'Scenario Details': 'Детали сценария',
'Scenario added': 'Сценарий добавлен',
'Scenario deleted': 'Сценарий удален',
'Scenario updated': 'Сценарий обновления',
'Scenario': 'Сценарий',
'Scenarios': 'сценарии',
'Schedule': 'расписание',
'Schema': 'СХЕМА',
'School Closure': 'Школы Закрытия',
'School Lockdown': 'Школа в строгой изоляции',
'School Teacher': 'Школьных Учителей',
'School activities': 'Школьной деятельности',
'School assistance': 'Помощь школе',
'School attendance': 'Посещаемости школы',
'School destroyed': 'Школы уничтожен',
'School heavily damaged': 'Школа сильно повреждена',
'School tents received': 'Школы палатках полученных',
'School tents, source': 'Школы палатки, источник',
'School used for other purpose': 'Школы используется для других целей',
'School': 'Школа',
'School/studying': 'Школа/учеба',
'Schools': 'Школы',
'Search Activities': 'Поиск в операциях',
'Search Activity Report': 'Поиск отчета об активности',
'Search Addresses': 'Найти адреса',
'Search Alternative Items': 'Поиск Альтернативных Элементов',
'Search Assessment Summaries': 'Найти суммарные оценки',
'Search Assessments': 'Найти оценки',
'Search Asset Log': 'Активов поиска Журнала',
'Search Assets': 'Найти активы',
'Search Baseline Type': 'Поиск типа базового уровня',
'Search Baselines': 'Поиск базовых уровней',
'Search Brands': 'Поиск брендов',
'Search Budgets': 'Поиск Бюджетов',
'Search Bundles': 'Поиск Комплектов',
'Search Camp Services': 'Поиск служб лагеря',
'Search Camp Types': 'Поиск типов лагеря',
'Search Camps': 'Поиск Лагерях',
'Search Catalog Items': 'Поиск элементов каталога',
'Search Catalogs': 'Поиск каталогов',
'Search Certificates': 'Поиска Сертификатов',
'Search Certifications': 'Поиск сертификатов',
'Search Checklists': 'Поиск контрольных списков',
'Search Cluster Subsectors': 'Поиск подсекторов кластера',
'Search Clusters': 'Поиск кластеров',
'Search Commitment Items': 'Поиск элементов фиксации',
'Search Commitments': 'Поиск обязательств',
'Search Competencies': 'Поиск компетентностей',
'Search Competency Ratings': 'Поиск Competency Рейтинговое',
'Search Contact Information': 'Поиск контактной информации',
'Search Contacts': 'Поиск контактов',
'Search Course Certificates': 'Поиск сертификатов курса',
'Search Courses': 'Поиск курсов',
'Search Credentials': 'Поиск мандатов',
'Search Documents': 'Поиск документов',
'Search Donors': 'Поиск Доноров',
'Search Entries': 'Поиск записей',
'Search Events': 'Поиск событий',
'Search Facilities': 'Поиск услуг',
'Search Feature Layers': 'Функция поиска Уровнями',
'Search Flood Reports': 'Поиск Наводнениями Отчеты',
'Search Groups': 'Поиск групп',
'Search Human Resources': 'Поиск людских ресурсов',
'Search Identity': 'Поиск Identity',
'Search Images': 'Поиск образов',
'Search Impact Type': 'Поиск тип воздействия',
'Search Impacts': 'Поиска Воздействия',
'Search Incident Reports': 'Поиск Инцидента Отчеты',
'Search Inventory Items': 'Поиск складских номенклатур',
'Search Inventory items': 'Поиск складских номенклатур',
'Search Item Categories': 'Элемент поиска Категорий',
'Search Item Packs': 'Элемент поиска Пакетов',
'Search Items': 'Найти позиции ТМЦ',
'Search Job Roles': 'Поиск ролей',
'Search Keys': 'Ключи поиска',
'Search Kits': 'Kits поиска',
'Search Layers': 'Поиск Уровнями',
'Search Level 1 Assessments': 'Поиска Уровня 1 оценок',
'Search Level 2 Assessments': 'Поиска Уровня 2 оценок',
'Search Locations': 'Поиск в размещениях',
'Search Log Entry': 'Поиск записи журнала',
'Search Map Profiles': 'Поиск конфигураций карты',
'Search Markers': 'Маркеров поиска',
'Search Members': 'Поиск Члена',
'Search Membership': 'Поиск Членства',
'Search Memberships': 'Поиск членов',
'Search Missions': 'Поисковые Миссии',
'Search Need Type': 'Поиск типа критической ситуации',
'Search Needs': 'Поиск критических ситуаций',
'Search Offices': 'Поиск офисов',
'Search Organizations': 'Поиск организаций',
'Search Peer': 'Поиск Однорангового',
'Search Personal Effects': 'Поиск Личных Эффекты',
'Search Persons': 'Найти сотрудников',
'Search Photos': 'Поиск Фотографий',
'Search Population Statistics': 'Изучите статистику населения',
'Search Positions': 'Поиск Позиции',
'Search Problems': 'Поиск Проблем',
'Search Projections': 'Поиск Прогнозы',
'Search Projects': 'Поиск Проектов',
'Search Rapid Assessments': 'Поиск Быстрой Оценки',
'Search Received Items': 'Поиск полученных номенклатур',
'Search Received Shipments': 'Поиска получен Поставок',
'Search Records': 'Записи поиска',
'Search Registations': 'Registations поиска',
'Search Registration Request': 'Поиск запрос о регистрации',
'Search Report': 'Поиск Отчета',
'Search Request Items': 'Требование поиска Элементов',
'Search Request': 'требование поиска',
'Search Requested Items': 'Запрошенный поиск Элементов',
'Search Requests': 'Поиск требований',
'Search Resources': 'Поиск Ресурсов',
'Search Rivers': 'Поиск Реках',
'Search Roles': 'Найти роли',
'Search Rooms': 'Найдите Комнаты',
'Search Scenarios': 'Изучите Сценарии',
'Search Sections': 'Поиск Разделов',
'Search Sectors': 'Поиск Секторов',
'Search Sent Items': 'Поиск Отправленных Элементов',
'Search Sent Shipments': 'Поиск отправленных партий товара',
'Search Service Profiles': 'Поиск сервисных профайлов',
'Search Settings': 'Параметры поиска',
'Search Shelter Services': 'Поиск Жилья Служб',
'Search Shelter Types': 'Поиска Жильем Типов',
'Search Shelters': 'Поиск Приюты',
'Search Skill Equivalences': 'Изучите Эквиваленты специалистов',
'Search Skill Provisions': 'Поиск Навыков Положения',
'Search Skill Types': 'Поиск типов навыка',
'Search Skills': 'Поиск Навыков',
'Search Solutions': 'поиск решений',
'Search Staff Types': 'Поиск Сотрудников Типы',
'Search Staff or Volunteer': 'Найдите Сотрудников или Добровольцев',
'Search Status': 'Найти состояние',
'Search Subsectors': 'Поиск Подсекторов',
'Search Support Requests': 'Поддержка поиска Запросы',
'Search Tasks': 'Найти задачи',
'Search Teams': 'Поиск Групп',
'Search Themes': 'Поиск Темы',
'Search Tickets': 'Найти паспорта',
'Search Tracks': 'Поиск Отслеживает',
'Search Trainings': 'Поиск &обучения',
'Search Twitter Tags': 'Поиска Twitter Теги',
'Search Units': 'Поиск элемента',
'Search Users': 'Поиск пользователей',
'Search Volunteer Availability': 'Поиск Добровольцев Доступности',
'Search Volunteers': 'Поиск Добровольцев',
'Search Warehouses': 'Поиска Складов',
'Search and Edit Group': 'Найти и отредактировать группу',
'Search and Edit Individual': 'Найти и отредактировать индивидуализированный объект',
'Search for Staff or Volunteers': 'Выявление сотрудников или волонтеров',
'Search for a Location by name, including local names.': 'Поиска расположения по имени, включая местные имена.',
'Search for a Person': 'Поиск пользователей',
'Search for a Project': 'Для поиска Проекта',
'Search for a shipment by looking for text in any field.': 'Выполнить поск отправленной партии по тексту в любом поле.',
'Search for a shipment received between these dates': 'поиск поставок полученных между этими датами',
'Search for an Organization by name or acronym': 'Поиск Организации по имени или аббревиатуре',
'Search for an Organization by name or acronym.': 'Поиск Организации по имени или аббревиатуре.',
'Search for an asset by text.': 'Поиск ресурса по тексту.',
'Search for an item by category.': 'Для поиска элемента в категории.',
'Search for an item by text.': 'Для поиска элемента в текст.',
'Search for asset by country.': 'Выполнить поиск актива по стране.',
'Search for office by country.': 'Выполнить поиск офиса по стране.',
'Search for office by organization.': 'Выполнить поиск офиса по организации.',
'Search for office by text.': 'Для поиска office по тексту.',
'Search for warehouse by country.': 'Поиск хранилища по стране.',
'Search for warehouse by organization.': 'Поиск хранилища по организации.',
'Search for warehouse by text.': 'Поиска хранилища по тексту.',
'Search here for a person record in order to:': 'Поиск в этом разделе записи пользователя в целях:',
'Search messages': 'Поиск сообщений',
'Search': 'Поиск по',
'Searching for different groups and individuals': 'Поиск для различных групп и отдельных лиц',
'Secondary Server (Optional)': 'Вторичный сервер (необязательно)',
'Seconds must be a number between 0 and 60': 'Секунд должен иметь значение между 0 и 60',
'Section Details': 'Раздел Сведения',
'Section deleted': 'Раздел удален',
'Section updated': 'Раздел обновление',
'Sections': 'разделы',
'Sector Details': 'Сектора Сведения',
'Sector added': 'Сектора добавляется',
'Sector deleted': 'Сектора удален',
'Sector updated': 'Сектора обновляется',
'Sector': 'Сектор',
'Sector(s)': 'Сектор (s)',
'Sectors': 'Секторов',
'Security Status': 'Состояние защиты',
'Security problems': 'Проблемы защиты',
'See All Entries': 'См. все записи',
'See all': 'Просмотреть все',
'See unassigned recovery requests': 'См. неназначенные запросы на восстановление',
'Select Items from the Request': 'Выберите элементы из запроса',
'Select Items from this Inventory': 'Выберите элементы из этого перечня',
'Select a location': 'Выберите расположение',
'Select a question from the list': 'Выберите вопрос из списка',
'Select a range for the number of total beds': 'Выберите диапазон для общего числа коек',
'Select all that apply': 'выберите все подходящие варианты',
'Select an Organization to see a list of offices': 'Выберите организацию для просмотра списка кабинетов',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Выберите перекрытия для оценок и деятельностей, относящиеся к каждой потребности, чтобы определить интервал.',
'Select the person assigned to this role for this project.': 'Выберите лицо, назначенное на эту роль в этом проекта.',
'Select to show this configuration in the Regions menu.': 'Выберите для отображения этой конфигурации в области меню.',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Выбирает, использовать модем, Tropo или другой шлюз для отправки SMS',
'Send Alerts using Email &/or SMS': 'Отправки оповещений с помощью Электронной Почты и/или SMS',
'Send Commitment as Shipment': 'Отправить обязательства как Поставка',
'Send New Shipment': 'Отправить новой отгрузки',
'Send Notification': 'Уведомление',
'Send Shipment': 'Отправка отгрузки',
'Send a message to this person': 'Отправить сообщение в этот человек',
'Send a message to this team': 'Отправить сообщение в этой группы',
'Send from %s': 'Отправить от% s',
'Send message': 'Отправить сообщение',
'Send new message': 'Отправить новое сообщение',
'Send': 'Отправка',
'Sends & Receives Alerts via Email & SMS': 'Отправляет& Получает Оповещения по электронной почте с помощью& SMS',
'Senior (50+)': 'Старший (50+)',
'Sent By Person': 'Отправлено по лицу',
'Sent By': 'Отправитель',
'Sent Item Details': 'Отправлено сведения о номенклатуре',
'Sent Item deleted': 'Отправлено элемент удален',
'Sent Item updated': 'Отправлено Элемент обновляется',
'Sent Shipment Details': 'Отправлено Shipment Сведения',
'Sent Shipment canceled and items returned to Inventory': 'Отправлено Shipment отменена и элементов возвращается в складские запасы',
'Sent Shipment canceled': 'Отправлено Shipment отменена',
'Sent Shipment updated': 'Отправлено Shipment обновляется',
'Sent Shipments': 'Отправлено Поставок',
'Sent': 'сент',
'Separated children, caregiving arrangements': 'Разлученных детей, уход механизмов',
'Serial Number': 'Серийный номер',
'Series': 'Серия',
'Server': 'сервера',
'Service Catalog': 'Каталог услуг',
'Service or Facility': 'Служба или Средство',
'Service profile added': 'Добавлен профайл сервиса',
'Service profile deleted': 'Удален профайл сервиса',
'Service profile updated': 'Обновлен профайл сервиса',
'Service': 'Услуга',
'Services Available': 'Доступные сервисы',
'Services': 'Услуги',
'Set Base Site': 'Set базового сайта',
'Set By': 'Набор путем',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Установите True чтобы позволить редактирование этого уровня иерархии расположения пользователям, которые не являются MapAdmins.',
'Setting Details': 'Сведения о параметрах',
'Setting added': 'Установка добавлена',
'Setting deleted': 'Параметр удален',
'Setting updated': 'Параметр обновляется',
'Settings updated': 'Параметры обновления',
'Settings were reset because authenticating with Twitter failed': 'Параметры были сброс поскольку идентификации с Twitter не',
'Settings which can be configured through the web interface are available here.': 'Параметры, который может быть настроены через этот веб интерфейс, доступны здесь.',
'Settings': 'параметры',
'Severe': 'Серьезный',
'Severity': 'Степень серьезности',
'Share a common Marker (unless over-ridden at the Feature level)': 'Используйте совместно обычный маркер (кроме случая, когда он замещен на уровне свойства)',
'Shelter & Essential NFIs': 'убежища и важнейшие NFI',
'Shelter Details': 'Сведения об убежище',
'Shelter Name': 'Название убежища',
'Shelter Registry': 'Жилье Реестра',
'Shelter Service Details': 'Подробности о службе убежищ',
'Shelter Service added': 'Жилье службы добавляемой',
'Shelter Service deleted': 'Жилье Службы удаленных',
'Shelter Service updated': 'Обновленная Служба убежищ',
'Shelter Service': 'Жилье Службы',
'Shelter Services': 'Службы убежищ',
'Shelter Type Details': 'Подробности о типе убежища',
'Shelter Type added': 'Добавлен тип убежища',
'Shelter Type deleted': 'Удален тип убежища',
'Shelter Type updated': 'Жилье Тип обновления',
'Shelter Type': 'Тип убежища',
'Shelter Types and Services': 'Жилье типы и службы',
'Shelter Types': 'Жилье Типы',
'Shelter added': 'убежище добавлено',
'Shelter deleted': 'Убежище удалено',
'Shelter updated': 'Жилья обновлено',
'Shelter': 'Убежище',
'Shelter/NFI Assistance': 'Жилье/NFI Помощь',
'Shelters': 'Приюты',
'Shipment Created': 'Shipment Созданных',
'Shipment Items received by Inventory': 'Поставка полученные номенклатуры по складской',
'Shipment Items sent from Inventory': 'Поставка отправлено элементов из запасов',
'Shipment Items': 'единицы партии товара',
'Shipment to Send': 'Поставка для отправки',
'Shipments To': 'Поставки в',
'Shipments': 'поставки',
'Shooting': 'гибель людей в результате огнестрельных нападений',
'Short Assessment': 'Краткая оценка',
'Short Description': 'Краткое описание',
'Show Checklist': 'Показать контрольную таблицу',
'Show Details': 'Просмотр сведений',
'Show Map': 'Показать преобразование',
'Show Region in Menu?': 'Показать область в меню?',
'Show on Map': 'Показать на карте',
'Show on map': 'Показать на карте',
'Sign-up as a volunteer': 'Sign - up в доброволец',
'Sign-up for Account': 'Sign - up для счета',
'Sign-up succesful - you should hear from us soon!': 'Sign - up отфильтрованного - вы должны услышать от нас в короткие сроки.',
'Sindhi': 'Синдхи',
'Site Administration': 'Управление сайтом',
'Site': 'сайт',
'Situation Awareness & Geospatial Analysis': 'Понимание ситуации и анализ местоположения',
'Situation': 'Ситуация',
'Sketch': 'Эскиз',
'Skill Catalog': 'Каталог квалификации',
'Skill Details': 'Сведения о навыке',
'Skill Equivalence Details': 'Skill Equivalence Сведения',
'Skill Equivalence added': 'Skill Equivalence добавлен',
'Skill Equivalence deleted': 'Skill Equivalence удален',
'Skill Equivalence updated': 'Skill Equivalence обновляется',
'Skill Equivalence': 'Эквивалентнность квалификации',
'Skill Equivalences': 'Skill Эквиваленты',
'Skill Provision Catalog': 'Skill Предоставление Каталога',
'Skill Provision Details': 'Skill Предоставление Сведений',
'Skill Provision added': 'Skill Предоставление добавлен',
'Skill Provision deleted': 'Skill Предоставление удален',
'Skill Provision updated': 'Skill Предоставление обновляется',
'Skill Provision': 'Skill Предоставление',
'Skill Provisions': 'Skill Положения',
'Skill Status': 'Skill Состояние',
'Skill TYpe': 'Skill Тип',
'Skill Type Catalog': 'Skill Тип Каталога',
'Skill Type Details': 'Skill сведения о типе',
'Skill Type added': 'Skill добавлен тип',
'Skill Type deleted': 'Skill Тип удален',
'Skill Type updated': 'Skill Тип обновления',
'Skill Types': 'Типы навыков',
'Skill added': 'Skill добавлен',
'Skill deleted': 'Skill удален',
'Skill updated': 'Навык обновлен',
'Skill': 'Опыт',
'Skills Catalog': 'Навыков Каталога',
'Skills Management': 'Управление навыками',
'Skills': 'навыки',
'Slope failure, debris': 'Неверный уклон, осколки',
'Small Trade': 'Мелкая торговля',
'Smoke': 'Дым',
'Snapshot Report': 'Отчет по моментальной копии',
'Snapshot': 'Моментальная копия',
'Snow Fall': 'Сноу Попадают',
'Snow Squall': 'Сноу Squall',
'Soil bulging, liquefaction': 'Колоссальные почв, сжижение',
'Solid waste': 'Твердых отходов',
'Solution Details': 'Детали решения',
'Solution Item': 'Элемент решения',
'Solution added': 'Решение добавлено',
'Solution deleted': 'Решение удалено',
'Solution updated': 'Решение обновлено',
'Solution': 'Способ устранения',
'Solutions': 'Решения',
'Some': 'некоторые',
'Sorry that location appears to be outside the area of the Parent.': 'Извините, что расположение будет находиться за пределами района родительского объекта.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Извините, что расположение будет находиться за пределами области, поддерживаемые этого развертывания.',
'Sorry, I could not understand your request': 'Извините, я не могли понять ваш запрос',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Извините, только пользователи с MapAdmin роль разрешено создавать расположение группы.',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Извините, только пользователи с MapAdmin роли есть смогут изменять эти положения',
'Sorry, something went wrong.': 'Извините, что причину.',
'Sorry, that page is forbidden for some reason.': 'Извините, страницы, будет запрещено по какой - либо причине.',
'Sorry, that service is temporary unavailable.': 'Извините, что служба будет временно недоступен.',
'Sorry, there are no addresses to display': 'К сожалению, нет адреса для отображения',
'Source ID': 'Исходный ИД',
'Source Time': 'Исходное время',
'Source': 'Исходный элемент',
'Sources of income': 'Источники дохода',
'Space Debris': 'Космический мусор',
'Spanish': 'испанский',
'Special Ice': 'Специальная порция мороженого',
'Special Marine': 'Специальный Морских',
'Specialized Hospital': 'Специализированных Больницу',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Конкретной области (например/укрепления комната) в пределах того расположения это или группы рассматривается.',
'Specific locations need to have a parent of level': 'Конкретных расположениях должны иметь родительский уровень',
'Specify a descriptive title for the image.': 'Указать название для изображения.',
'Specify the bed type of this unit.': 'Указать кровать, тип этого блока.',
'Specify the number of available sets': 'Укажите число доступных задает',
'Specify the number of available units (adult doses)': 'Укажите число доступных единиц (взрослого доз)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Укажите число доступных единиц (л) от Ringer - Lactate или эквивалентный решения',
'Specify the number of sets needed per 24h': 'Укажите число требующихся наборов на 24h',
'Specify the number of units (adult doses) needed per 24h': 'Указать количество единиц (взрослого доз) требуется на 24h',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Указать количество единиц (л) от Ringer - Lactate или эквивалентный решений необходимых в 24h',
'Spherical Mercator?': 'Сферический Меркатор?',
'Spreadsheet Importer': 'Импортер электронной таблицы',
'Spreadsheet uploaded': 'электронная таблица загружена',
'Spring': 'Источник',
'Squall': 'шквал',
'Staff & Volunteers': 'Штатные сотрудники и волонтеры',
'Staff Member Details': 'Сведения о сотруднике',
'Staff Members': 'Сотрудники',
'Staff Record': 'Запись штата сотрудников',
'Staff Type Details': 'Сотрудников сведения о типе',
'Staff Type added': 'Сотрудников добавлен тип',
'Staff Type deleted': 'Сотрудников Тип удален',
'Staff Type updated': 'Сотрудников Тип обновления',
'Staff Types': 'Сотрудников Типы',
'Staff and Volunteers': 'Персонал и добровольцы',
'Staff member added': 'Добавлен сотрудник',
'Staff present and caring for residents': 'Персонал отсутствует и ухаживая за жителями',
'Staff': 'штат',
'Staff2': 'персонал2',
'Staffing': 'Штатное',
'Stairs': '&лестница',
'Start Date': 'начальная дата',
'Start date': 'начальная дата',
'Start of Period': 'Начало периода',
'State': 'штат',
'Stationery': 'Канцпринадлежности',
'Status Report': 'Отчет о состоянии',
'Status Updated': 'Статус обновлен',
'Status added': 'Статус добавлен',
'Status deleted': 'Статус удален',
'Status of clinical operation of the facility.': 'Оценка клинического обслуживания, предоставляемого медицинским учреждением.',
'Status of general operation of the facility.': 'Оценка работы медицинского учреждения в общем.',
'Status of morgue capacity.': 'Оценка работы морга.',
'Status of operations of the emergency department of this hospital.': 'Оценка работы отделения неотложной помощи этой больницы.',
'Status of security procedures/access restrictions in the hospital.': 'Оценка мер безопасности/ограничения доступа в больнице.',
'Status of the operating rooms of this hospital.': 'Состояние операционных этой больницы.',
'Status updated': 'Статус обновлен',
'Status': 'статус',
'Steel frame': 'Стальная конструкция',
'Stolen': 'Украденный',
'Store spreadsheets in the Eden database': 'Хранить электронные таблицы в Эдема базы данных',
'Storeys at and above ground level': 'Улице и в выше уровня земли',
'Storm Force Wind': 'Штормовой Ветер',
'Storm Surge': 'Штормовой нагон воды',
'Stowaway': 'Безбилетный пассажир',
'Street Address': 'Улица, дом',
'Strong Wind': 'Сильный ветер',
'Structural Hazards': 'Риск структурного сбоя',
'Structural': 'Структурный',
'Style Field': 'Поле стиль',
'Style Values': 'Значения стиля',
'Sub-type': 'Sub - type',
'Subject': 'тема',
'Submission successful - please wait': 'Отправка на выполнение успешна - ждите, пожалуйста',
'Submission successful - please wait...': 'Передача на выполнение успешна - пожалуйста, подождите.',
'Submit New (full form)': 'Отправить новый (полная форма)',
'Submit New (triage)': 'Отправить новый (triage)',
'Submit New': 'Передать новый',
'Submit a request for recovery': 'Отправить запрос на восстановление',
'Submit new Level 1 assessment (full form)': 'Отправить новый уровень 1 оценки (полная форма)',
'Submit new Level 1 assessment (triage)': 'Отправить новый уровень 1 оценки (triage)',
'Submit new Level 2 assessment': 'Отправить новый уровень 2 оценка',
'Subscription Details': 'Сведения о подписке',
'Subscription added': 'Добавлена подписка',
'Subscription deleted': 'Удалена подписка',
'Subscription updated': 'Apply обновлены',
'Subscriptions': 'подписки',
'Subsector Details': 'Подсектора Сведения',
'Subsector added': 'Подсектора добавлен',
'Subsector deleted': 'Подсектора удален',
'Subsector updated': 'Подсектора обновляется',
'Subsector': 'Подсектора',
'Subsectors': 'Подсекторов',
'Subsistence Cost': 'Стоимость прожиточного',
'Suburb': 'Пригороде',
'Suggest not changing this field unless you know what you are doing.': 'Предложить не изменяете это поле если вы знаете, что делаете.',
'Summary by Administration Level': 'Сводка по уровень администрирования',
'Summary': 'Сводная информация',
'Sunday': 'Воскресенье',
'Supply Chain Management': 'Управление цепочкой поставок',
'Support Request': 'Запрос о поддержке',
'Support Requests': 'Запросы о поддержке',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Поддерживает решение, принятое большими группами экспертов кризисного управления, по оказанию помощи группам в создании ранжированного списка.',
'Surgery': 'Хирургия',
'Survey Answer Details': 'Детали ответа на анкету',
'Survey Answer added': 'Ответ на анкету добавлен',
'Survey Answer deleted': 'Ответ на анкету удален',
'Survey Answer updated': 'Ответ на анкету обновлен',
'Survey Answer': 'Ответ на анкету',
'Survey Module': 'Модуль анкеты',
'Survey Name': 'название анкеты',
'Survey Question Details': 'Детали вопроса анкетирования',
'Survey Question Display Name': 'Отображаемое имя вопроса анкетирования',
'Survey Question added': 'Вопрос анкетирования добавлен',
'Survey Question deleted': 'Вопрос анкетирования удален',
'Survey Question updated': 'Вопрос анкетирования обновлен',
'Survey Question': 'Вопрос анкетирования',
'Survey Series Details': 'Обзор Серии Сведения',
'Survey Series Name': 'Обследования имя ряда',
'Survey Series added': 'Серия обследования добавлена',
'Survey Series deleted': 'Обзор Серии удален',
'Survey Series updated': 'Обзор Серии обновляется',
'Survey Series': 'Серии обследования',
'Survey Template Details': 'Детали шаблона обследования',
'Survey Template added': 'Добавлен шаблон опроса',
'Survey Template deleted': 'Шаблон опроса удален',
'Survey Template updated': 'Шаблон опроса обновлен',
'Survey Template': 'Шаблон опроса',
'Survey Templates': 'Шаблоны для обследования',
'Symbology': 'Символика',
'Sync Conflicts': 'Конфликты синхронизации',
'Sync History': 'Хронология синхронизации',
'Sync Now': 'Синхронизировать сейчас',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Синхр Партнеры являются экземплярами или одноранговыми (SahanaEden, SahanaAgasti, Ushahidi, и т. ), который будет информации о синхронизации с. Щелкните на ссылке в правой, чтобы перейти на страницу где можно добавить синхр, партнеры поиск для синхронизации партнеры и их изменения.',
'Sync Partners': 'Синхронизированные партнеры',
'Sync Pools': 'Пулы синхронизации',
'Sync Schedule': 'Расписание синхронизации',
'Sync Settings': 'Параметры синхронизации',
'Sync process already started on': 'Процесс синхронизации уже запущен на',
'Synchronisation': 'Синхронизация',
'Synchronization Conflicts': 'Конфликты синхронизации',
'Synchronization Details': 'Детали синхронизации',
'Synchronization History': 'Истории синхронизации',
'Synchronization Peers': 'Синхронизации Peers',
'Synchronization Settings': 'Параметры синхронизации',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Синхронизация позволяет совместно использовать данные, которые были с другими и обновить собственную базу данных с последними данными из других peers. Эта страница содержит при информацию об использовании функции синхронизации Sahana Эдема',
'Synchronization not configured.': 'Синхронизации не сконфигурирован.',
'Synchronization settings updated': 'Параметры синхронизации обновления',
'Synchronization': 'синхронизация',
'Syncronisation History': 'История синхронизации',
'Tags': 'Теги',
'Take shelter in place or per <instruction>': 'Занять убежище взамен или на',
'Task Details': 'Сведения о задаче',
'Task List': 'Список задач',
'Task Status': 'Состояние задачи',
'Task added': 'Задание добавлено',
'Task deleted': 'Задача удалена',
'Task updated': 'Задача обновления',
'Tasks': 'Задачи',
'Team Description': 'Описание группы',
'Team Details': 'Сведения о группе',
'Team ID': 'Идентификатор группы',
'Team Id': 'Идентификатор группы',
'Team Leader': 'Руководитель команды',
'Team Member added': 'Член группы добавлен',
'Team Members': 'Члены команды',
'Team Name': 'Название группы',
'Team Type': 'Тип группы',
'Team added': 'Группа добавлена',
'Team deleted': 'Группа удалена',
'Team updated': 'Группа обновляется',
'Team': 'Команда',
'Teams': 'Группы',
'Technical testing only, all recipients disregard': 'Технические тестирования, только всех получателей игнорировать',
'Telecommunications': 'Телекоммуникаций',
'Telephone': 'Телефон',
'Telephony': 'Телефонная связь',
'Temp folder %s not writable - unable to apply theme!': 'Папке Temp% s не writable - не &&применить тему!',
'Template file %s not readable - unable to apply theme!': 'Файл шаблона% s не чтения - не &&применить тему!',
'Templates': 'Шаблоны',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Термин в 5. - уровне в страны - административного отдела (например, голосования или postcode деления). Этот уровень не часто используется.',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Термин в 4. - уровне в страны - административного отдела (например, деревню, Neighborhood или Полицейского Участка).',
'Term for the primary within-country administrative division (e.g. State or Province).': 'Термин для обозначения основной единицы административного деления в пределах страны (например, штат или область).',
'Term for the secondary within-country administrative division (e.g. District or County).': 'Термин для обозначения дополнительной единицы административного деления в пределах страны (например, округ или графство).',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'Термин для обозначения единицы административного деления третьего порядка в пределах страны (например, крупный город или город)',
'Term for the top-level administrative division (i.e. Country).': 'Термин для обозначения административной единицы высшего порядка (например, страна).',
'Territorial Authority': 'Территориальная власть',
'Terrorism': 'Терроризм',
'Tertiary Server (Optional)': 'Третичный сервер (необязательно)',
'Text Color for Text blocks': 'Цвет текста для блоков текста',
'Text': 'текст',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Спасибо для проверки электронной почты. Вашей учетной записи пользователя еще не для утверждения на системному (% s ). вы получите уведомление по электронной почте когда ваша учетная запись будет активирована.',
'Thanks for your assistance': 'Спасибо за вашу помощь',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '"query" - это условие подобное \\ " db.table1.field1==\'value\'\\ ". Что-то подобное \\ " db.table1.field1 == db.table2.field2\\ " имеет следствием SQL JOIN.',
'The Area which this Site is located within.': 'Область, в которой находится этот сайт.',
'The Assessments module allows field workers to send in assessments.': 'Оценка модуль позволяет полевых работников для отправки в зачет.',
'The Author of this Document (optional)': 'По автору этого документа (необязательно)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'Здания Asssesments модуль позволяет построение безопасности на оценивается, например после землетрясения.',
'The Camp this Request is from': 'Лагерь, из которого этот запрос',
'The Camp this person is checking into.': 'Лагерь, который проверяет данный сотрудник.',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Текущего расположения сотрудника/группы, которые могут быть общие (для передачи) или точное (для отображения на карте). Введите несколько символов для поиска из доступных местах.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'Этот адрес в который утверждения запросы передаются (обычно этот подход может быть электронной почты групп вместо отдельных). Если это поле пустое то запросы будут утверждены автоматически, если домен совпадает.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Система сообщений о происшествиях позволяет общественности сообщать о происшествиях и отслеживать их.',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Размещение лица поступают из, которые могут быть общие (для отчетов) или точное (для отображения на карте). Введите несколько символов для поиска из доступных местах.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Размещение лица будет, которые могут быть общие (для отчетов) или точное (для отображения на карте). Введите несколько символов для поиска из доступных местах.',
'The Media Library provides a catalog of digital media.': 'Медиа библиотека предоставляет каталог цифровых медиа.',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Модуль обмена сообщениями, является основной узловой центр связи в Sahana system. Он используется для отправки предупреждений и/или сообщений с помощью SMS и Электронной Почты для различных групп и отдельных лиц до, во время и после сбоя.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'Регистратура организации следит за всеми работающими в области организациями, оказывающими помощь.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Проект модуля контроля позволяет создания деятельности с пробелов в оценки потребностей.',
'The Role this person plays within this hospital.': 'Роль этого пользователя играет в этом больницу.',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'С жильем Реестра отслеживает все приютов и сохраняет основные сведения относительно их. Он сотрудничает с другими модулями для отслеживания пользователей, связанных с жильем, доступность служб и т.',
'The Shelter this Request is from': 'С жильем этого запроса из',
'The Shelter this person is checking into.': 'Шельтер. Проверяется, находится ли в нем данное лицо',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'URL адрес для GetCapabilities на страницы Web Map Service (WMS), уровнями необходимо, доступных посредством панели браузера на карте.',
'The URL of your web gateway without the post parameters': 'URL - web gateway без должности параметры',
'The URL to access the service.': 'URL, доступа к службе.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'Уникальный идентификатор (UUID) как для этой помощью правительства.',
'The asset must be assigned to a site OR location.': 'Ресурс должен быть назначен сайту ИЛИ расположению.',
'The attribute which is used for the title of popups.': 'Атрибут, который используется для заголовка всплывающих окон.',
'The attribute within the KML which is used for the title of popups.': 'Этот атрибут в KML который используется для заголовка всплывающими окнами.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'Атрибут (ы) в KML которые используются для тела всплывающими окнами. (используйте пробел между атрибутами)',
'The body height (crown to heel) in cm.': 'Тела высота (короной в канавке) в cm.',
'The country the person usually lives in.': 'Страны лицо обычно проживает в.',
'The default Organization for whom this person is acting.': 'Организация по умолчанию, для которой работает это лицо',
'The default Organization for whom you are acting.': 'Организации по умолчанию, для которой вы работаете.',
'The duplicate record will be deleted': 'В дубликат записи будут удалены',
'The first or only name of the person (mandatory).': 'Первого или единственного имени пользователя (обязательными).',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'Форма URL - http://your/web/map/service?service=WMS&request=GetCapabilities, где your/web/map/service обозначает для URL путь к WMS.',
'The language you wish the site to be displayed in.': 'Язык, требуется, узлу быть при выводе.',
'The list of Brands are maintained by the Administrators.': 'В списке Брендов сохраняются, администраторов.',
'The list of Catalogs are maintained by the Administrators.': 'Список каталогов сохраняются, администраторов.',
'The map will be displayed initially with this latitude at the center.': 'Карты будут показаны сначала с этим широты в центре.',
'The map will be displayed initially with this longitude at the center.': 'Карты будут показаны первоначально с этой долготы в центре.',
'The minimum number of features to form a cluster.': 'Минимального числа функций для сформировать кластер.',
'The name to be used when calling for or directly addressing the person (optional).': 'Имя будет использоваться при вызове или для непосредственно касающихся пользователя (необязательно).',
'The next screen will allow you to detail the number of people here & their needs.': 'В следующем окне позволит вам быть подробно число людей здесь& их потребностей.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'Число единиц измерения альтернативных единиц которые равны одной основной единице измерения',
'The number of pixels apart that features need to be before they are clustered.': 'Отдельно от числа пикселей, эти характеристики должны быть до того, как они группируются.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '- количество tiles в отображается карты для загрузки. Ноль означает, что 1. страница загрузится быстрее, большее число означает последующие панорамирования выполняется быстрее.',
'The person at the location who is reporting this incident (optional)': 'Лицо, сообщающее о происшествии с места событий (необязательно)',
'The post variable containing the phone number': 'Пост переменной, содержащий номер телефона',
'The post variable on the URL used for sending messages': 'Пост в переменной URL для отправки сообщений',
'The post variables other than the ones containing the message and the phone number': 'Пост переменные помимо тех, содержит сообщение и номер телефона',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'Последовательный порт, к которому подключен модем, -/dev/ttyUSB0 и т. д. в linux и com1, com2 и т. д. в Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'Сервер не получил своевременный ответ от другого сервера, что он доступен для заполнения запроса броузером',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'Сервер получил неверный ответ от другого сервера что он доступен для заполнения запроса броузером',
'The site where this position is based.': 'Места, где это положение основывается.',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Сотрудниками responsibile для объектов может делать запросы для оказания помощи. Обязательства можно сделать против этих Запросы в запросы остается открытой до тех пор пока реквестер подтверждает, что запрос будет завершена.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'Предметом событие больше не создает угрозу или озабоченности и все последующие меры по описан в<instruction>',
'The time at which the Event started.': 'Время, в которое событий запущен.',
'The token associated with this application on': 'Маркер, связанных с данным приложением на',
'The unique identifier which identifies this instance to other instances.': 'Уникальный идентификатор, который определяет этот экземпляр с других экземпляров.',
'The way in which an item is normally distributed': 'В котором элемент правило распределенных',
'The weight in kg.': 'Вес в кг.',
'The': 'Эта',
'Theme Details': 'Детали темы',
'Theme added': 'Тема добавлена',
'Theme deleted': 'Тема удалена',
'Theme updated': 'Тема обновления',
'Theme': 'тема',
'Themes': 'темы',
'There are errors': 'Есть ошибки',
'There are insufficient items in the Inventory to send this shipment': 'Нет достаточного количества материальных запасов для отправки этой партии товара',
'There is no address for this person yet. Add new address.': 'Не существует адрес для этого лица. пока Добавить новый адрес.',
'These are settings for Inbound Mail.': 'Эти параметры для входящих почтовых сообщений.',
'These are the Incident Categories visible to normal End-Users': 'Эти Происшествия Категории отображается в конечных пользователей',
'These need to be added in Decimal Degrees.': 'Эти должны быть добавлены в десятичном градусов.',
'They': 'Они',
'This Group has no Members yet': 'В настоящее время нет зарегистрированных членов',
'This Team has no Members yet': 'В настоящее время нет зарегистрированных членов',
'This appears to be a duplicate of': 'Это очевидно копия',
'This file already exists on the server as': 'Этот файл уже существует на сервере как',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'Это уместно, если этот уровень в стадии построения. Для предотвращения случайного изменения после завершения обработки этого уровня можно установить на False (ложный).',
'This is the way to transfer data between machines as it maintains referential integrity.': 'Это способ для передачи данных между системами как он поддерживает ссылочной целостности.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Это способ для передачи данных между системами как он поддерживает ссылочная целостность ... повторными данными нужно будет удалить вручную 1.!',
'This level is not open for editing.': 'Этот уровень не открыт для редактирования.',
'This might be due to a temporary overloading or maintenance of the server.': 'Это может быть из - за временной перегрузки или обслуживания сервера.',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Этот модуль позволяет предметам хранения быть потребованными & отгруженными в пределах возможного наличия товарных запасов',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Этот модуль позволит вам спланировать сценарии для обоих мероприятий и событий. Можно выделить надлежащие ресурсов (людских, активы& объектов), с тем чтобы они могли быть мобилизованы легко.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Здесь показаны журналы за синхронизирует. Щелкните на приведенной ниже ссылке для перехода к этой странице.',
'This screen allows you to upload a collection of photos to the server.': 'Это меню позволяет для передачи коллекции фотографий на сервер.',
'This setting can only be controlled by the Administrator.': 'Этот параметр может контролироваться только администратором.',
'This shipment has already been received.': 'Эта партия товара уже получена',
'This shipment has already been sent.': 'Эта партия товара уже отправлена',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'Эта партия товара еще не была получена- она еще не была отменена, так как она все еще может изменяться',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'Эта партия товара еще не была послана- она еще не была отменена, так как она все еще может изменяться',
'This shipment will be confirmed as received.': 'Это поставка будет подтверждена сразу по получении.',
'Thunderstorm': 'Разгрузочно',
'Thursday': 'Четверг',
'Ticket Details': 'Сведения о пропуске',
'Ticket ID': 'ID паспорта',
'Ticket added': 'Паспорт добавляется',
'Ticket deleted': 'Паспорт удален',
'Ticket updated': 'Билет обновляется',
'Ticket': 'Паспорт',
'Ticketing Module': 'Ticketing Модуля',
'Tickets': 'Паспорта',
'Tilt-up concrete': 'Наклоняйте - up конкретные',
'Timber frame': 'Лесоматериалам фрейма',
'Timeline Report': 'Timeline Отчета',
'Timeline': 'временная диаграмма',
'Title to show for the Web Map Service panel in the Tools panel.': 'Заголовок для отображения панели сервисов для Web Map в панели службы панель инструментов.',
'Title': 'Обращение',
'To Location': 'В расположение',
'To Person': 'Для сотрудника',
'To begin the sync process, click the button on the right =>': 'Для начала процесс синхронизации, нажмите кнопку в правой =>',
'To begin the sync process, click this button =>': 'Для начала процесс синхронизации нажмите эту кнопку, =>',
'To create a personal map configuration, click': 'Для создания конфигурации личной карты кликните мышью',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Изменить OpenStreetMap, необходимо для изменения OpenStreetMap параметры в моделях/000_config. py',
'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Для поиска по названию должности, введите любую часть этого названия. Можно использовать% в качестве группового символа.',
'To variable': 'В переменную',
'To': 'укажите',
'Tools': 'утилиты',
'Tornado': 'Торнадо',
'Total # of Target Beneficiaries': 'Общее число бенефициаров - получателей',
'Total # of households of site visited': 'Общее число семей, посетивших сайт.',
'Total Beds': 'Общее Коек',
'Total Beneficiaries': 'Общее Бенефициары',
'Total Cost per Megabyte': 'Общая стоимость за Мегабайт',
'Total Cost per Minute': 'Общая стоимость за Минуту',
'Total Monthly Cost': 'Общее месячные расходы',
'Total Monthly Cost:': 'Всего Месяц стоимость:',
'Total Monthly': 'Общее Ежемесячно',
'Total One-time Costs': 'Разовые расходы, всего',
'Total Persons': 'Всего людей',
'Total Recurring Costs': 'Периодические издержки, всего',
'Total Unit Cost': 'Полная стоимость единицы',
'Total Unit Cost:': 'Общая стоимость единицы:',
'Total Units': 'Всего элементов',
'Total gross floor area (square meters)': 'Общая площадь (кв. метров)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Общее число коек в этой больнице. Автоматически обновляется из ежедневных отчетов.',
'Total number of houses in the area': 'Общее число домов в области',
'Total number of schools in affected area': 'Общее число школ в пострадавшем районе',
'Total population of site visited': 'Общее число посетивших сайт',
'Total': 'Итого',
'Totals for Budget:': 'Итоги для бюджета:',
'Totals for Bundle:': 'Итоги для пакета:',
'Totals for Kit:': 'Итоги для комплекта:',
'Tourist Group': 'Туристическая группа',
'Town': 'город',
'Traces internally displaced people (IDPs) and their needs': 'Трассировок перемещенных лиц (впл) и их потребности',
'Tracing': 'трассировка',
'Track Details': 'Отслеживать подробности',
'Track deleted': 'Отслеживать удаленные',
'Track updated': 'Отслеживать обновляется',
'Track uploaded': 'Отслеживать закачан',
'Track with this Person?': 'Отслеживать с этого пользователя?',
'Track': 'Отслеживать',
'Tracking of Projects, Activities and Tasks': 'Отслеживания проектов, мероприятий и задач',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Отслеживания основные сведения о о положении, объектов и размере Приюты',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Отслеживает расположение, распределения, возможностей и разбиение пострадавших в Приюты',
'Tracks': 'Дорожки',
'Traffic Report': 'Трафик Отчета',
'Training Course Catalog': 'Каталог учебного курса',
'Training Details': 'Детали обучения',
'Training added': 'Обучение добавлено',
'Training deleted': 'Обучение удалено',
'Training updated': 'Обучение обновлено',
'Training': 'Обучение',
'Trainings': 'Тренировки',
'Transit Status': 'Состояние транзита',
'Transit': 'Транзит',
'Transition Effect': 'Эффект переноса',
'Transparent?': 'Транспарентным?',
'Transportation assistance, Rank': 'Транспорт помощи, Ранг',
'Trauma Center': 'Посттравматического Center',
'Travel Cost': 'Путевых расходов',
'Tropical Storm': 'Тропический шторм',
'Tropo Messaging Token': 'Tropo Сообщениями Маркер',
'Tropo Settings': 'Параметры Tropo',
'Tropo Voice Token': 'Tropo Голосовой Маркер',
'Tropo settings updated': 'Параметры Tropo обновлены',
'Truck': 'Грузовой автомобиль',
'Try checking the URL for errors, maybe it was mistyped.': 'Попробуйте проверить URL на ошибки, возможно он неверно набран.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'Попробуйте достиг обновления/reload button или пытаться URL из адресной строки еще раз.',
'Try refreshing the page or hitting the back button on your browser.': 'Попробуйте обновить страницу или ударил по кнопке назад в браузере.',
'Tsunami': 'Цунами',
'Tuesday': 'Вторник',
'Twitter ID or #hashtag': 'Twitter ID или #hashtag',
'Twitter Settings': 'Twitter Параметры',
'Twitter': 'twitter',
'Type of Construction': 'Тип сооружения',
'Type of water source before the disaster': 'Тип источника воды до бедствия',
'Type': 'тип',
'UID': 'Идентификатор пользователя',
'UN': 'ООН',
'URL': 'унифицированный указатель ресурса',
'Un-Repairable': 'Неподдающийся ремонту',
'Unable to parse CSV file!': 'Невозможно проанализировать файл CSV!',
'Understaffed': 'Недоукомплектован',
'Unidentified': 'Неидентифицированные',
'Unit Cost': 'Стоимость за единицу',
'Unit added': 'Единица добавляется',
'Unit deleted': 'Подразделение удалено',
'Unit of Measure': 'Единицы измерения',
'Unit updated': 'Единица обновлена',
'Units': 'Подразделения',
'Unknown Peer': 'Неизвестный узел',
'Unknown type of facility': 'Неизвестный тип возможности',
'Unknown': 'Неизвестный',
'Unreinforced masonry': 'Неармированая кладка',
'Unresolved Conflicts': 'Неразрешенные конфликты',
'Unsafe': 'Опасный',
'Unselect to disable the modem': 'отменить выбор отключения модема',
'Unsent': 'непосланый',
'Unsupported data format!': 'Неподдерживаемый формат данных!',
'Unsupported method!': 'Неподдерживаемый метод!',
'Update Activity Report': 'Обновить отчет о деятельности',
'Update Cholera Treatment Capability Information': 'Обновить информацию о возможности лечения холеры',
'Update Request': 'Обновить запрос',
'Update Service Profile': 'Обновить служебный профайл',
'Update Status': 'Обновить состояние',
'Update Task Status': 'Обновить статус задания',
'Update Unit': 'Элемент обновления',
'Update if Master': 'Если обновление Сводного',
'Update if Newer': 'Update если новее',
'Update your current ordered list': 'Обновите ваш текущий упорядоченный список',
'Update': 'Обновить',
'Updated By': 'Обновлен',
'Upload Photos': 'Выгрузите фотографии',
'Upload Spreadsheet': 'Загрузить таблицу',
'Upload Track': 'Upload Отслеживания',
'Upload a Spreadsheet': 'Выгрузите электронную таблицу',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Перешлите файл изображения (bmp, gif, jpeg или png), макс 300x300 пикселей!',
'Upload an image file here.': 'Выгрузите файл изображения сюда.',
'Upload an image, such as a photo': 'Вгрузите изображения, такие как фото',
'Urban Fire': 'Городских Пожара',
'Urban area': 'Городских области',
'Urdu': 'Урду',
'Urgent': 'Срочно',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Использовать (...)&(...) для, и (...)|(...) для, или и ~(...) не создавать более сложные запросы.',
'Use Geocoder for address lookups?': 'Использование Геокодера для просмотров адресов?',
'Use default': 'Использовать по умолчанию',
'Use these links to download data that is currently in the database.': 'Используйте эти ссылки для скачивания данных, которые в данный момент находятся в базе данных.',
'Used by IRS & Assess': 'Используется в IRS & Assess',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Используется в onHover Tooltip& Кластеру Всплывающими Окнами для различения между типами.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Для создания onHover Tooltip& 1. поле также используется в кластере Всплывающими Окнами для различения между записями.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Для проверки, что широта введено расположениях является разумным. Может использоваться для фильтрации списков ресурсов, расположениях.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Для проверки, что longitude - введено расположениях является разумным. Может использоваться для фильтрации списков ресурсов, расположениях.',
'Used to import data from spreadsheets into the database': 'Для импорта данных из электронной таблицы в базу',
'Used within Inventory Management, Request Management and Asset Management': 'Используется в управлении запасами, управление запросами и управления активами',
'User Account has been Disabled': 'Учетная запись пользователя была Отключена',
'User Details': 'Сведения о пользователе',
'User Management': 'управление пользователями',
'User Profile': 'Профайл пользователя',
'User Requests': 'Пользовательские запросы',
'User Updated': 'Пользователь обновлен',
'User added': 'Пользователь добавлен',
'User already has this role': 'У пользователя уже есть этой роли',
'User deleted': 'Пользователь удален',
'User updated': 'Пользователь обновлен',
'User': 'пользователя',
'Username': 'пользователь',
'Users removed': 'Пользователей удалены',
'Users': 'Пользователи',
'Uses the REST Query Format defined in': 'Использует формат запроса REST, определенный в',
'Utilities': 'утилиты',
'Utility, telecommunication, other non-transport infrastructure': 'Утилитой, электросвязи, другие не - транспортной инфраструктуры',
'Value': 'Значение в',
'Various Reporting functionalities': 'Различных Отчетов функциональностями',
'Vehicle Crime': 'Транспортное Преступности',
'Vehicle Types': 'Типы транспортных',
'Vehicle': 'Механизм',
'Verification Status': 'Состояние проверки',
'Verified?': 'Проверена?',
'Verify password': 'Проверьте пароль',
'Version': 'Обнаружена версия',
'Very Good': 'Очень хорошо',
'Very High': 'очень высокий',
'View Alerts received using either Email or SMS': 'Просмотр оповещений получил либо с помощью Электронной Почты или SMS',
'View All': 'просмотреть все',
'View Error Tickets': 'Просмотреть паспорта с ошибкой',
'View Fullscreen Map': 'Просмотр Fullscreen Карты',
'View Image': 'Просмотреть изображение',
'View Items': 'просмотреть элементы',
'View On Map': 'Показать на карте',
'View Outbox': 'Показать исходящие',
'View Picture': 'Просмотра Изображения',
'View Settings': 'Просмотреть установки',
'View Tickets': 'Просмотреть билеты',
'View and/or update their details': 'Просмотреть и/или обновить свои сведения',
'View or update the status of a hospital.': 'Показать или обновить статус больницы.',
'View pending requests and pledge support.': 'Показать отложенные требования и поддержку обязательств.',
'View the hospitals on a map.': 'Просмотреть местонахождение больниц на карте.',
'View/Edit the Database directly': 'Просмотреть/редактировать базы данных непосредственно',
'Village Leader': 'Вождь селения',
'Village': 'Селение',
'Visible?': 'Видимы?',
'Visual Recognition': 'Visual Признание',
'Volcanic Ash Cloud': 'Вулканическим Cloud',
'Volcanic Event': 'Событие извержения',
'Volume (m3)': 'Том (m3)',
'Volunteer Availability': 'Добровольцев Доступности',
'Volunteer Details': 'Сведения доброволец',
'Volunteer Information': 'Добровольцев Информации',
'Volunteer Management': 'Добровольцев Управления',
'Volunteer Project': 'Добровольцев Проекта',
'Volunteer Record': 'Запись добровольцев',
'Volunteer Request': 'Запрос на волонтера',
'Volunteer added': 'Добровольцев добавлен',
'Volunteer availability added': 'Добровольцев доступности добавлен',
'Volunteer availability deleted': 'Добровольцев доступности удаленных',
'Volunteer availability updated': 'Добровольцев доступности обновления',
'Volunteer deleted': 'Добровольцев удален',
'Volunteer details updated': 'Сведения доброволец обновления',
'Volunteers were notified!': 'Волонтеры были уведомлены!',
'Volunteers': 'волонтеры',
'Vote': 'Голосовать',
'Votes': 'Голоса',
'WASH': 'ВЫМОЙТЕ',
'Walking Only': 'Только пешеходного',
'Wall or other structural damage': 'Штепсельной или другие структурные повреждения',
'Warehouse Details': 'Сведения хранилища',
'Warehouse added': 'Добавлен хранилища',
'Warehouse deleted': 'Хранилище удалено',
'Warehouse updated': 'Обновление хранилища',
'Warehouse': 'Хранилище',
'Warehouses': 'Склады',
'Water Sanitation Hygiene': 'Воды санитарии гигиены',
'Water collection': 'Водосбор',
'Water gallon': 'Галлон Воды',
'Water storage containers in households': 'Контейнеры для хранения воды в домашних хозяйствах',
'Water supply': 'Водоснабжения',
'Web Map Service Browser Name': 'Веб - служба карт имя браузера',
'Web Map Service Browser URL': 'Веб - служба карт URL браузера',
'Website': 'веб-сайте',
'Wednesday': 'Среда',
'Weight (kg)': 'Вес (кг)',
'Weight': 'Масса',
'Welcome to the Sahana Portal at': 'Добро пожаловать в Sahana портале по',
'Well-Known Text': 'Well - Known Text',
'Wheat': 'пшеничный',
'When reports were entered': 'Когда доклады были введены',
'Whiskers': 'Цинковых',
'Who is doing what and where': 'Кто, что и где делает',
'Who usually collects water for the family?': 'Кто обычно набирает воду для семьи?',
'Width (m)': 'Ширина (m)',
'Wild Fire': 'Подстановки Пожара',
'Wind Chill': 'Ветра Гололед',
'Window frame': 'Фрейм окна',
'Winter Storm': 'Зимой Грозы',
'Women of Child Bearing Age': 'Женщины детородного возраста',
'Women participating in coping activities': 'Женщины, участвующие в деятельности по преодолению трудностей',
'Women who are Pregnant or in Labour': 'Женщины, которые являются беременными или роженицы',
'Womens Focus Groups': 'Группы по женским интересам',
'Wooden plank': 'Деревянная доска',
'Wooden poles': 'Деревянные столбы',
'Working hours end': 'Конец рабочего дня',
'Working hours start': 'Начало рабочего дня',
'Working or other to provide money/food': 'Работа или другое для обеспечения деньгами/едой',
'X-Ray': 'X - Ray',
'YES': 'да',
'Year built': 'Год встроенные',
'Year of Manufacture': 'Года производства',
'Yellow': 'желтый',
'Yes': 'да',
'You are a recovery team?': 'Вы - группы спасателей?',
'You are attempting to delete your own account - are you sure you want to proceed?': 'Вы пытаетесь удалить собственную учетную запись - вы действительно хотите продолжить?',
'You are currently reported missing!': 'В настоящее время вы числитесь пропавшим!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Можно изменить конфигурацию синхронизации модуля в разделе параметры. Эта конфигурация включает в UUID (уникальный идентификационный номер), sync расписаниями, маяком обслуживания и так далее. Следующие кнопки link to go to the параметры синхронизации страницы.',
'You can click on the map below to select the Lat/Lon fields': 'Можно щелкнуть на карте ниже, чтобы выбрать Lat/Lon поля',
'You can select the Draw tool': 'Можно выбрать в Нарисуйте инструментом',
'You can set the modem settings for SMS here.': 'Можно задать параметры модема для СМС здесь.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Можно использовать инструмент преобразования для преобразования любого из координаты GPS или степеней/минуты/секунды.',
'You do not have permission for any facility to make a commitment.': 'У вас нет разрешения ни на какую возможность для выполнения обязательства.',
'You do not have permission for any facility to make a request.': 'У вас нет разрешения ни на какую возможность для создания запроса.',
'You do not have permission for any site to add an inventory item.': 'У вас нет разрешения ни на какое место для добавления предмета хранения.',
'You do not have permission for any site to receive a shipment.': 'У вас нет прав для любого узла для получения отгрузки.',
'You do not have permission for any site to send a shipment.': 'У вас нет прав для любого узла для отправки отгрузки.',
'You do not have permission to cancel this received shipment.': 'У вас нет прав для отмены этой полученных отгрузки.',
'You do not have permission to cancel this sent shipment.': 'У вас нет прав для отмены этой отправлено отгрузки.',
'You do not have permission to make this commitment.': 'У вас нет прав на создание этого обязательства.',
'You do not have permission to receive this shipment.': 'У вас нет прав для получения этой отгрузки.',
'You do not have permission to send a shipment from this site.': 'У вас нет прав для отправки отгрузки с этого сайта.',
'You do not have permission to send this shipment.': 'У вас нет прав для отправки данного отгрузки.',
'You have a personal map configuration. To change your personal configuration, click': ', у вас личные конфигурации отображения. Для изменения личного конфигурацию, нажмите',
'You have found a dead body?': 'Вы обнаружили труп?',
'You must be logged in to register volunteers.': 'Вы должны войти в для регистрации добровольцев.',
'You must be logged in to report persons missing or found.': 'Необходимо войти в отчет лиц пропавших без вести или найденных.',
'You must provide a series id to proceed.': 'Необходимо указать кода серии для продолжения.',
'You should edit Twitter settings in models/000_config.py': 'Вам следует отредактировать установки Twitter-а в models/000_config.py',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Ваш текущий упорядоченный список решений, как показано ниже. Его можно изменить путем голосования еще раз.',
'Your post was added successfully.': 'Чтобы после было успешно добавлено.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Системе был назначен уникальный (UUID), какие другие компьютеров в можно использовать для идентифицировать вас. Чтобы просмотреть UUID, можно перейти к Синхронизации -> параметры синхронизации. Можно также увидеть другие параметры на этой странице.',
'Zero Hour': 'Ноль Час',
'Zinc roof': 'Цинковая крыша',
'Zoom Levels': 'уровни масштабирования',
'Zoom': 'увеличение масштаба',
'active': 'текущий',
'added': 'добавлены',
'all records': 'все записи',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'Позволяет разработать бюджет на основе затрат на штаты и оборудование, включая любые накладных расходы на администрирование.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'Позволяет, для создания и управления обследованием, оценить ущерб, нанесенный стихийным бедствием.',
'an individual/team to do in 1-2 days': 'Для отдельного/команды делать в 1 - 2 дней',
'assigned': 'Присвоено',
'average': 'средний',
'black': 'черный',
'blond': 'Блондин',
'blue': 'синий',
'brown': 'коричневый',
'by': 'пользователем',
'c/o Name': 'C/o имя',
'can be used to extract data from spreadsheets and put them into database tables.': 'Может быть использован для извлечения данных из электронной таблицы и помещения их в таблицы базы данных.',
'caucasoid': 'европеоид',
'check all': 'проверить все',
'click for more details': 'Щелкните для дополнительные сведения',
'completed': 'завершено',
'consider': 'Рассматриваемый',
'curly': 'кудрявый',
'currently registered': 'В настоящее время зарегистрированными',
'daily': 'ежедневно',
'dark': 'Темный',
'data uploaded': 'Данных закачан',
'database %s select': 'Базы данных% s выбрать',
'database': 'Сервер баз данных',
'db': 'БД',
'deceased': 'Скончавшегося',
'delete all checked': 'Удалить все проверенное',
'deleted': 'удалены',
'design': 'Эскиз',
'diseased': 'Заболевших',
'displaced': 'Перемещенных',
'divorced': 'Разведен(а)',
'done!': 'Выполнено!',
'duplicate': 'Создать копию',
'edit': 'редактировать',
'eg. gas, electricity, water': 'Например Газ, электричество, вода',
'enclosed area': 'Защищенная область',
'export as csv file': 'Экспортировать как файл csv',
'fat': 'жир',
'feedback': 'Отправить комментарий',
'female': 'Женский',
'flush latrine with septic tank': 'Flush туалетов с септических цистерн',
'forehead': 'лоб',
'found': 'найдено',
'from Twitter': 'Из Twitter',
'green': 'зеленый',
'grey': 'Серый',
'here': 'Здесь',
'high': 'Максимум',
'hourly': 'Ежечасный',
'households': 'Домашние хозяйства',
'identified': 'Идентифицирован',
'ignore': 'Игнорировать',
'in Deg Min Sec format': 'В формате Градусы Мин Сек',
'in GPS format': 'В ГСОК формате',
'inactive': 'Отключить',
'injured': 'Потерпевшее',
'insert new %s': 'Вставить новый% s',
'insert new': 'Вставить новый',
'invalid request': 'Неверный запрос',
'invalid': 'Ошибка',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': '- электронной справке централизованного хранилища когда информация по всем жертв бедствий и семьи, особенно на жертв, эвакуированных и перемещенных лиц можно хранить. Такие сведения имя, возраст, контактный номер идентификации, номер карты, перемещенных, расположение и другие сведения будут сохранены. Рисунок и finger печать сведения о пользователи могут загрузить в систему. Люди могут также быть собраны с группы для эффективности и удобства.',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': '- предусматриваемая для состоять из нескольких г - модулей, совместную работу для обеспечения сложные функциональные возможности для управления по оказанию чрезвычайной помощи и элементы проекта, организации. Эта содержит VLSI системы, то системы управления складом, сырье отслеживания, управления цепочкой поставок, об использовании автопарка, закупки, финансовое отслеживание и других активов и возможности управления ресурсами',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Отслеживает все входящие паспорта позволяет им иметь разведывательной& направляться в соответствующие места для actioning.',
'latrines': 'уборные',
'leave empty to detach account': 'Оставьте пустым для отключения учетной записи',
'legend URL': 'Условные URL',
'light': 'светлый',
'login': 'идентификатор входа',
'long': 'Расширенный',
'low': 'Низкое',
'male': 'Мужской',
'manual': 'руководство',
'married': 'Женат/замужем',
'medium': 'средняя',
'medium<12cm': 'в среднем<12cm',
'meters': 'метры',
'missing': 'Отсутствующий',
'module allows the site administrator to configure various options.': 'Модуль позволяет администратору сайта настроить различные опции.',
'module helps monitoring the status of hospitals.': 'Модуль поможет наблюдение за состоянием больницы.',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'Модуль предоставляет механизм для совместной обзор развития аварии, использование электронной mapping (гис).',
'mongoloid': 'монголоид',
'more': 'Еще',
'n/a': 'Отсутствует',
'negroid': 'негроид',
'never': 'никогда',
'new record inserted': 'Новая вставленная запись',
'new': 'создать',
'next 100 rows': 'Далее 100 строк',
'no': 'нет',
'none': 'Отсутствует',
'normal': 'обычный',
'not accessible - no cached version available!': 'Не доступен - не кэшированную версию недоступна!',
'not accessible - using cached version from': 'Не доступен - с помощью кэшированную версию из',
'not specified': 'не указано',
'obsolete': 'Устаревший',
'on': 'равной',
'once': 'Однократный',
'open defecation': 'Открыть defecation',
'optional': 'необязательный',
'or import from csv file': 'Или импорт из файла csv',
'other': 'прочий',
'over one hour': 'Более одного часа',
'people': 'Участники',
'piece': 'Часть',
'pit latrine': 'выгребной туалет',
'pit': 'кратер',
'postponed': 'Отложенный',
'preliminary template or draft, not actionable in its current form': 'Предварительный шаблон или черновик не активируем в его нынешнем виде',
'previous 100 rows': '100 предыдущих строк',
'record does not exist': 'Запись не существует',
'record id': 'ID записи',
'red': 'красный',
'reports successfully imported.': 'Отчеты успешно импортирован.',
'representation of the Polygon/Line.': 'Представление в Многоугольник/строки.',
'retired': 'Удалено из списка',
'river': 'Река',
'see comment': 'См. комментарий',
'selected': 'выбрано',
'separated from family': 'разлучен с семьей',
'separated': 'Отдельное проживание',
'shaved': 'побрит',
'short': 'Краткий',
'short<6cm': 'обрубок<6см',
'sides': 'По бокам',
'sign-up now': 'Sign - up теперь',
'single': 'Холост/не замужем',
'slim': 'тонкий',
'specify': 'Указание ключа и типа',
'staff members': 'Сотрудники',
'staff': 'персонал',
'state location': 'Состояние расположение',
'state': 'штат',
'straight': 'Прямой',
'suffered financial losses': 'Понесенные финансовые потери',
'table': 'Таблица',
'tall': 'Высокий',
'this': 'Это',
'to access the system': 'Доступ к системе',
'tonsure': 'Tonsure',
'total': 'итого',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'Tweepy модуль не, доступных в, Python - это требует установки для не - Tropo Twitter поддержки!',
'unable to parse csv file': 'Невозможно проанализировать файл csv',
'uncheck all': 'Отменить все проверки',
'unidentified': 'Неидентифицирован',
'unknown': 'неизвестный',
'unspecified': 'неуточненный',
'unverified': 'Непроверенный',
'updated': 'обновлено',
'updates only': 'Только обновления',
'verified': 'Проверено',
'volunteer': 'Добровольцев',
'volunteers': 'волонтеры',
'wavy': 'Волнистая',
'weekly': 'Еженедельный',
'white': 'бесцветный',
'wider area, longer term, usually contain multiple Activities': 'Широкая область, долгосрочный период обычно включают многочисленные мероприятия',
'widowed': 'овдовевш-ий (ая)',
'within human habitat': 'В среде обитания человека',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'Xlwt модуль не, доступных в, Python - это нужно установка для XLS вывода!',
'yes': 'да',
}
|
flavour/eden
|
languages/ru.py
|
Python
|
mit
| 351,319
|
[
"VisIt"
] |
b1e1eb25ab1d6fc0a6b781066e79ae51e59c49be7794903624c3ce2c87301550
|
########################################################################
# $HeadURL $
# File: PhysicalRemoval.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/04/02 11:56:10
########################################################################
""" :mod: PhysicalRemoval
=====================
.. module: PhysicalRemoval
:synopsis: PhysicalRemoval operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
PhysicalRemoval operation handler
"""
__RCSID__ = "$Id $"
# #
# @file PhysicalRemoval.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/04/02 11:56:22
# @brief Definition of PhysicalRemoval class.
# # imports
import os
# # from DIRAC
from DIRAC import S_OK, gMonitor
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
from DIRAC.Resources.Storage.StorageElement import StorageElement
########################################################################
class PhysicalRemoval( OperationHandlerBase, DMSRequestOperationsBase ):
"""
.. class:: PhysicalRemoval
"""
def __init__( self, operation = None, csPath = None ):
"""c'tor
:param self: self reference
:param Operation operation: Operation instance
:param str csPath: cs config path
"""
OperationHandlerBase.__init__( self, operation, csPath )
# # gMonitor stuff
gMonitor.registerActivity( "PhysicalRemovalAtt", "Physical file removals attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "PhysicalRemovalOK", "Successful file physical removals",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "PhysicalRemovalFail", "Failed file physical removals",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "PhysicalRemovalSize", "Physically removed size",
"RequestExecutingAgent", "Bytes", gMonitor.OP_ACUM )
def __call__( self ):
""" perform physical removal operation """
bannedTargets = self.checkSEsRSS( access = 'RemoveAccess' )
if not bannedTargets['OK']:
gMonitor.addMark( "PhysicalRemovalAtt" )
gMonitor.addMark( "PhysicalRemovalFail" )
return bannedTargets
if bannedTargets['Value']:
return S_OK( "%s targets are banned for removal" % ",".join( bannedTargets['Value'] ) )
# # get waiting files
waitingFiles = self.getWaitingFilesList()
# # prepare pfn dict
toRemoveDict = dict( [ ( opFile.PFN, opFile ) for opFile in waitingFiles ] )
targetSEs = self.operation.targetSEList
gMonitor.addMark( "PhysicalRemovalAtt", len( toRemoveDict ) * len( targetSEs ) )
# # keep errors dict
removalStatus = dict.fromkeys( toRemoveDict.keys(), None )
for pfn in removalStatus:
removalStatus[pfn] = dict.fromkeys( targetSEs, "" )
for targetSE in targetSEs:
self.log.info( "removing files from %s" % targetSE )
# # 1st - bulk removal
bulkRemoval = self.bulkRemoval( toRemoveDict, targetSE )
if not bulkRemoval["OK"]:
self.log.error( bulkRemoval["Message"] )
self.operation.Error = bulkRemoval["Message"]
return bulkRemoval
bulkRemoval = bulkRemoval["Value"]
for pfn, opFile in toRemoveDict.items():
removalStatus[pfn][targetSE] = bulkRemoval["Failed"].get( pfn, "" )
opFile.Error = removalStatus[pfn][targetSE]
# # 2nd - single file removal
toRetry = dict( [ ( pfn, opFile ) for pfn, opFile in toRemoveDict.items() if pfn in bulkRemoval["Failed"] ] )
for pfn, opFile in toRetry.items():
self.singleRemoval( opFile, targetSE )
if not opFile.Error:
removalStatus[pfn][targetSE] = ""
else:
gMonitor.addMark( "PhysicalRemovalFail", 1 )
removalStatus[pfn][targetSE] = opFile.Error
# # update file status for waiting files
failed = 0
for opFile in self.operation:
if opFile.Status == "Waiting":
errors = [ error for error in removalStatus[opFile.PFN].values() if error.strip() ]
if errors:
failed += 1
opFile.Error = ",".join( errors )
if "Write access not permitted for this credential" in opFile.Error:
opFile.Status = "Failed"
gMonitor.addMark( "PhysicalRemovalFail", len( errors ) )
continue
gMonitor.addMark( "PhysicalRemovalOK", len( targetSEs ) )
gMonitor.addMark( "PhysicalRemovalSize", opFile.Size * len( targetSEs ) )
opFile.Status = "Done"
if failed:
self.operation.Error = "failed to remove %s files" % failed
return S_OK()
def bulkRemoval( self, toRemoveDict, targetSE ):
""" bulk removal of pfns from :targetSE:
:param dict toRemoveDict: { pfn : opFile, ... }
:param str targetSE: target SE name
"""
bulkRemoval = StorageElement( targetSE ).removeFile( toRemoveDict )
return bulkRemoval
def singleRemoval( self, opFile, targetSE ):
""" remove single file from :targetSE: """
proxyFile = None
if "Write access not permitted for this credential" in opFile.Error:
# # not a DataManger? set status to failed and return
if "DataManager" not in self.shifter:
opFile.Status = "Failed"
elif not opFile.LFN:
opFile.Error = "LFN not set"
opFile.Status = "Failed"
else:
# # you're a data manager - save current proxy and get a new one for LFN and retry
saveProxy = os.environ["X509_USER_PROXY"]
try:
proxyFile = self.getProxyForLFN( opFile.LFN )
if not proxyFile["OK"]:
opFile.Error = proxyFile["Message"]
else:
proxyFile = proxyFile["Value"]
removeFile = StorageElement( targetSE ).removeFile( opFile.PFN )
if not removeFile["OK"]:
opFile.Error = removeFile["Message"]
else:
removeFile = removeFile["Value"]
if opFile.LFN in removeFile["Failed"]:
opFile.Error = removeFile["Failed"][opFile.LFN]
else:
# # reset error - replica has been removed this time
opFile.Error = ""
finally:
if proxyFile:
os.unlink( proxyFile )
# # put back request owner proxy to env
os.environ["X509_USER_PROXY"] = saveProxy
return S_OK( opFile )
|
avedaee/DIRAC
|
DataManagementSystem/Agent/RequestOperations/PhysicalRemoval.py
|
Python
|
gpl-3.0
| 6,634
|
[
"DIRAC"
] |
8856b308751a705fe655a3b563a8766fef86f6311603042df984833eba5bbb79
|
import os
from os.path import join
import numpy as n
def writeScript(rootName, plate):
f=open(rootName+".sh",'w')
f.write("#!/bin/bash \n")
f.write("#PBS -l walltime=260:00:00 \n")
f.write("#PBS -o "+plate+".o.$PBS_JOBID \n")
f.write("#PBS -e "+plate+".e$PBS_JOBID \n")
f.write("#PBS -M comparat@mpe.mpg.de \n")
f.write("module load apps/anaconda/2.4.1 \n")
f.write("module load apps/python/2.7.8/gcc-4.4.7 \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/galaxy/python/ \n")
f.write("export PYTHONPATH=$PYTHONPATH:/users/comparat/pySU/spm/python/ \n")
f.write(" \n")
f.write("cd /users/comparat/pySU/spm/bin \n")
f.write("python run_stellarpop_ebossdr14_salpeter "+plate+" \n")
f.write(" \n")
f.close()
plates = n.loadtxt( join(os.environ['EBOSSDR14_DIR'], "catalogs", "plateNumberList"), unpack=True, dtype='str')
for plate in plates:
rootName = join(os.environ['HOME'], "batch_dr14_firefly_salpeter", plate)
writeScript(rootName, plate)
|
JohanComparat/pySU
|
spm/bin/write_run_scripts_dr14_salpeter.py
|
Python
|
cc0-1.0
| 974
|
[
"Galaxy"
] |
d8a194e9afb8c3852d54817cd76b2af19e158bfe41bfe3456171e1b5dd377a75
|
import plac
from os import path
import numpy as np
from scipy import sparse
from scipy.io import savemat
from cmmlib.inout import load_mesh, save_coff
from cmmlib import cmm
@plac.annotations(
K=('number of CMHBs', 'positional', None, int),
mu=('sparsity parameter mu', 'positional', None, float),
visualize=('visualize the weights?', 'flag', 'v'),
scaled=('respect triangle scaling?', 'flag', 's'),
output_dir=('output directory', 'option', 'o'),
maxiter=('maximum number of iterations', 'option', None, int),
ply=('output ply file?', 'flag', None),
off=('output off file?', 'flag', None)
)
def main(input_filename, K, mu, output_dir=None, visualize=False, scaled=False,
maxiter=None, ply=False, off=False):
if (off or ply) and not output_dir:
print "please specify an output directory"
return 1
if output_dir and not path.exists(output_dir):
print "%s does not exist" % output_dir
return 2
verts, tris = load_mesh(input_filename, normalize=True)
print "%d vertices, %d faces" % (len(verts), len(tris))
Phi_cpr, D = cmm.compressed_manifold_modes(
verts, tris, K, mu=mu, scaled=scaled,
maxiter=maxiter, verbose=100, return_D=True)
if D is None:
D_diag = np.ones(len(verts))
D = sparse.eye(len(verts))
else:
D_diag = D.data
if visualize:
from cmmlib.vis.weights import show_weights
show_weights(verts, tris, Phi_cpr)
if output_dir:
# save in simple text format
np.savetxt(path.join(output_dir, 'phi.txt'), Phi_cpr, fmt='%f')
np.savetxt(path.join(output_dir, 'D_diag.txt'), D_diag, fmt='%f')
# save in matlab format
savemat(path.join(output_dir, 'phi.mat'),
dict(verts=verts, tris=tris+1, phi=Phi_cpr, D=D))
# save HDF5 format if possible
try:
import h5py
except ImportError:
print "Cannot save as HDF5, please install the h5py module"
else:
with h5py.File(path.join(output_dir, 'phi.h5'), 'w') as f:
f['verts'] = verts
f['tris'] = tris
f['phi'] = Phi_cpr
f['d_diag'] = D_diag
# save NPY format
np.save(path.join(output_dir, 'phi.npy'), Phi_cpr)
np.save(path.join(output_dir, 'D_diag.npy'), Phi_cpr)
if off or ply:
# map phi scalars to colors
from mayavi.core.lut_manager import LUTManager
from cmmlib.vis.weights import _centered
lut = LUTManager(lut_mode='RdBu').lut.table.to_array()[:, :3]
colors = [
lut[(_centered(Phi_cpr[:, k]) * (lut.shape[0]-1)).astype(int)]
for k in xrange(K)]
# save in a single scene as a collage
w = int(np.ceil(np.sqrt(K))) if K > 6 else K
spacing = 1.2 * verts.ptp(axis=0)
all_verts = [verts + spacing * (1.5, 0, 0)]
all_tris = [tris]
all_color = [np.zeros(verts.shape, np.int) + 127]
for k in xrange(K):
all_verts.append(verts + spacing * (-(k % w), 0, int(k / w)))
all_tris.append(tris + len(verts) * (k+1))
all_color.append(colors[k])
if off:
save_coff(path.join(output_dir, 'input.off'),
verts.astype(np.float32), tris)
for k in xrange(K):
save_coff(path.join(output_dir, 'cmh_%03d.off' % k),
verts.astype(np.float32), tris, colors[k])
save_coff(path.join(output_dir, 'all.off'),
np.vstack(all_verts), np.vstack(all_tris),
np.vstack(all_color))
if ply:
from tvtk.api import tvtk
pd = tvtk.PolyData(
points=np.vstack(all_verts).astype(np.float32),
polys=np.vstack(all_tris).astype(np.uint32))
pd.point_data.scalars = np.vstack(all_color).astype(np.uint8)
pd.point_data.scalars.name = 'colors'
ply = tvtk.PLYWriter(
file_name=path.join(output_dir, 'all.ply'),
input=pd, color=(1, 1, 1))
ply.array_name = 'colors'
ply.write()
if __name__ == '__main__':
plac.call(main)
|
tneumann/cmm
|
compute_cmm.py
|
Python
|
gpl-2.0
| 4,422
|
[
"Mayavi"
] |
6dfea2166c315fba3b055083f7c07138d4331d558b608d7dd786f4555427c439
|
"""
implements FilenameParsers for scripter
"""
from bowtie import BowtieFilenameParser
#from bwa import bwaFilenameParser
from illumina import BarcodeFilenameParser
from aligned import BAMFilenameParser
from peaks import PeaksFilenameParser
__all__ = [
'BowtieFilenameParser',
# 'bwaFilenameParser',
'BarcodeFilenameParser',
'BAMFilenameParser',
'PeaksFilenameParser'
]
|
benjschiller/seriesoftubes
|
seriesoftubes/fnparsers/__init__.py
|
Python
|
artistic-2.0
| 391
|
[
"BWA",
"Bowtie"
] |
28afe94f06a97c0e2718bab4243a3bf86f29daed2c30d0b4df45886b47651a61
|
import os.path
import os
import numpy as np
from PyQt5.QtWidgets import QPushButton, QWidget
from PyQt5.QtWidgets import QComboBox, QLabel, QLineEdit
from PyQt5.QtWidgets import QProgressBar, QApplication
from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QFormLayout
from PyQt5.QtWidgets import QSpinBox,QDoubleSpinBox
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtCore import QCoreApplication, Qt
from PDielec.Constants import PI, avogadro_si, angstrom
# Import plotting requirements
import matplotlib
import matplotlib.figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from PDielec.Utilities import Debug
class PlottingTab(QWidget):
def __init__(self, parent, debug=False ):
super(QWidget, self).__init__(parent)
global debugger
debugger = Debug(debug,'PlottingTab')
debugger.print('Start:: Plotting tab initialisation')
self.settings = {}
self.refreshRequired = True
self.subplot = None
self.setWindowTitle('Plotting')
self.settings['Minimum frequency'] = 0
self.settings['Maximum frequency'] = 200
self.settings['Frequency increment'] = 0.2
self.molar_definitions = ['Unit cells','Atoms','Molecules']
self.settings['Molar definition'] = 'Unit cells'
self.settings['Number of atoms'] = 1
self.settings['Plot type'] = 'Powder Molar Absorption'
# self.settings['Plot title'] = 'Plot Title'
self.legends = []
self.vs_cm1 = []
self.frequency_units = None
self.molar_cb_current_index = 0
# store the notebook
self.notebook = parent
# get the reader from the main tab
self.reader = self.notebook.mainTab.reader
# Create last tab - PlottingTab
vbox = QVBoxLayout()
form = QFormLayout()
#
# The minimum frequency
#
self.vmin_sb = QDoubleSpinBox(self)
self.vmin_sb.setRange(0,9000)
self.vmin_sb.setValue(self.settings['Minimum frequency'])
self.vmin_sb.setToolTip('Set the minimum frequency to be considered)')
self.vmin_sb.valueChanged.connect(self.on_vmin_changed)
#
# The maximum frequency
#
self.vmax_sb = QDoubleSpinBox(self)
self.vmax_sb.setRange(0,9000)
self.vmax_sb.setValue(self.settings['Maximum frequency'])
self.vmax_sb.setToolTip('Set the maximum frequency to be considered)')
self.vmax_sb.valueChanged.connect(self.on_vmax_changed)
#
# Choose a suitable increment
#
self.vinc_sb = QDoubleSpinBox(self)
self.vinc_sb.setRange(0.0001,5.0)
self.vinc_sb.setSingleStep(0.1)
self.vinc_sb.setDecimals(4)
self.vinc_sb.setToolTip('Choose an increment for the frequency when plotting')
self.vinc_sb.setValue(self.settings['Frequency increment'])
self.vinc_sb.valueChanged.connect(self.on_vinc_changed)
#
label = QLabel('Frequency min, max and increment', self)
label.setToolTip('Choose minimum, maximum and increment for the frequency when plotting')
#
hbox = QHBoxLayout()
hbox.addWidget(self.vmin_sb)
hbox.addWidget(self.vmax_sb)
hbox.addWidget(self.vinc_sb)
form.addRow(label, hbox)
#
# Define molar quantity
#
self.molar_cb = QComboBox(self)
self.molar_cb.setToolTip('Define what a mole is. \nIn the case of Molecules, the number of atoms in a molecule must be given')
self.molar_cb.addItems(self.molar_definitions)
try:
self.molar_cb_current_index = self.molar_definitions.index(self.settings["Molar definition"])
except:
self.molar_cb_current_index = 0
self.settings["Molar definition"] = self.molar_definitions[self.molar_cb_current_index]
self.molar_cb.setCurrentIndex(self.molar_cb_current_index)
self.molar_cb.activated.connect(self.on_molar_cb_activated)
label = QLabel('Molar definition', self)
label.setToolTip('Define what a mole is. \nIn the case of Molecules, the number of atoms in a molecule must be given')
form.addRow(label, self.molar_cb)
#
# Number of atoms in a molecule
#
self.natoms_sb = QSpinBox(self)
self.natoms_sb.setToolTip('Set the number of atoms in a molecule. \nOnly need this if moles of molecules is needed')
self.natoms_sb.setRange(1,500)
self.natoms_sb.setValue(self.settings['Number of atoms'])
self.natoms_sb.valueChanged.connect(self.on_natoms_changed)
self.natoms_sb.setEnabled(False)
label = QLabel('Number of atoms per molecule', self)
label.setToolTip('Set the number of atoms in a molecule. \nOnly need this if moles of molecules is needed')
form.addRow(label, self.natoms_sb)
#jk #
#jk # Set the plot title
#jk #
#jk self.title_le = QLineEdit(self)
#jk self.title_le.setToolTip('Set the plot title')
#jk self.title_le.setText(self.settings['Plot title'])
#jk self.title_le.textChanged.connect(self.on_title_changed)
#jk label = QLabel('Plot title', self)
#jk label.setToolTip('Set the plot title')
#jk form.addRow(label, self.title_le)
#
# Set the x-axis frequency units
#
self.funits_cb = QComboBox(self)
self.funits_cb.setToolTip('Set the frequency units for the x-axis')
self.funits_cb.addItems( ['wavenumber','THz'] )
self.frequency_units = 'wavenumber'
self.funits_cb.activated.connect(self.on_funits_cb_activated)
label = QLabel('Frequency units for the x-axis', self)
label.setToolTip('Set the frequency units for the x-axis')
form.addRow(label, self.funits_cb)
#
# Final button
#
self.plot_type_cb = QComboBox(self)
self.plot_type_cb.setToolTip('Choose the which data to plot')
self.plot_types = [
'Powder Molar Absorption',
'Powder Absorption',
'Powder Real Permittivity',
'Powder Imaginary Permittivity',
'Powder ATR',
'Crystal Reflectance (P polarisation)',
'Crystal Reflectance (S polarisation)',
'Crystal Transmittance (P polarisation)',
'Crystal Transmittance (S polarisation)',
'Crystal Absorbtance (P polarisation)',
'Crystal Absorbtance (S polarisation)',
]
self.plot_ylabels = {
'Powder Molar Absorption': r'Molar Absorption Coefficient $\mathdefault{(L mole^{-1} cm^{-1})}$',
'Powder Absorption': r'Absorption Coefficient $\mathdefault{(cm^{-1})}$',
'Powder Real Permittivity': r'Real Component of Permittivity',
'Powder Imaginary Permittivity': r'Imaginary Component of Permittivity',
'Powder ATR': r'ATR absorption',
'Crystal Reflectance (P polarisation)': r'Fraction of p-polarised reflectance',
'Crystal Reflectance (S polarisation)': r'Fraction of s-polarised reflectance',
'Crystal Transmittance (P polarisation)': r'Fraction of p-polarised transmitted',
'Crystal Transmittance (S polarisation)': r'Fraction of s-polarised transmitted',
'Crystal Absorbtance (P polarisation)': r'Fraction of p-polarised absorbtance',
'Crystal Absorbtance (S polarisation)': r'Fraction of s-polarised absorbtance',
}
self.plot_type_cb.activated.connect(self.on_plot_type_cb_activated)
self.plot_type_cb.addItems( self.plot_types )
label = QLabel('Choose plot type', self)
label.setToolTip('Choose the plot type')
index = self.plot_type_cb.findText(self.settings['Plot type'], Qt.MatchFixedString)
self.plot_type_cb.setCurrentIndex(index)
plot_button = QPushButton('Update plot')
plot_button.clicked.connect(self.refresh)
plot_button.setToolTip('Update the plot')
hbox = QHBoxLayout()
hbox.addWidget(self.plot_type_cb)
hbox.addWidget(plot_button)
form.addRow(label, hbox)
# Add a progress bar
self.progressbar = QProgressBar(self)
self.progressbar.setToolTip('Show the progress of any calculations')
# Append the progress bar to the list of progress bars managed by the notebook
self.notebook.progressbars_add(self.progressbar)
self.notebook.progressbars_set_maximum(0)
label = QLabel('Calculation progress', self)
label.setToolTip('Show the progress of any calculations')
form.addRow(label,self.progressbar)
# Add the matplotlib figure to the bottom
self.figure = matplotlib.figure.Figure()
self.canvas = FigureCanvas(self.figure)
self.canvas.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding))
self.toolbar = NavigationToolbar(self.canvas, self)
self.toolbar.setSizePolicy(QSizePolicy(QSizePolicy.Fixed,QSizePolicy.Fixed))
form.addRow(self.canvas)
form.addRow(self.toolbar)
vbox.addLayout(form)
# finalise the layout
self.setLayout(vbox)
QCoreApplication.processEvents()
# Create the plot
debugger.print('Finished:: Plotting tab initialisation')
return
def requestRefresh(self):
debugger.print('Start:: requestRefresh')
self.refreshRequired
debugger.print('Finished:: requestRefresh')
return
def requestScenarioRefresh(self):
debugger.print('Start:: requestScenarioRefresh')
self.notebook.settingsTab.requestRefresh()
for scenario in self.notebook.scenarios:
scenario.requestRefresh()
debugger.print('Finished:: requestScenarioRefresh')
return
def on_vinc_changed(self,value):
debugger.print('Start:: on_vinc_changed', value)
self.vinc_sb.blockSignals(True)
value = self.vinc_sb.value()
self.settings['Frequency increment'] = value
self.notebook.fitterTab.requestRefresh()
self.refreshRequired = True
self.requestScenarioRefresh()
debugger.print('on_vinc_change ', self.settings['Frequency increment'])
self.vinc_sb.blockSignals(False)
debugger.print('Finished:: on_vinc_changed', value)
def on_vmin_changed(self):
debugger.print('Start:: on_vmin_changed')
self.vmin_sb.blockSignals(True)
vmin = self.vmin_sb.value()
vmax = self.vmax_sb.value()
self.settings['Minimum frequency'] = vmin
debugger.print('on_vmin_changed setting vmin to', self.settings['Minimum frequency'])
self.notebook.fitterTab.requestRefresh()
self.refreshRequired = True
self.requestScenarioRefresh()
self.vmin_sb.blockSignals(False)
debugger.print('Finished:: on_vmin_changed')
def on_vmax_changed(self):
debugger.print('Start:: on_vmax_changed')
self.vmax_sb.blockSignals(True)
vmin = self.vmin_sb.value()
vmax = self.vmax_sb.value()
self.settings['Maximum frequency'] = vmax
debugger.print('on_vmax_changed setting vmax to ', self.settings['Maximum frequency'])
self.notebook.fitterTab.requestRefresh()
self.refreshRequired = True
self.requestScenarioRefresh()
self.vmax_sb.blockSignals(False)
debugger.print('Finished:: on_vmax_changed')
def refresh(self,force=False):
debugger.print('Start:: refresh', force)
if not self.refreshRequired and not force:
debugger.print('Finished:: refreshing widget not required')
return
#
# Block signals during refresh
#
self.greyed_out()
for w in self.findChildren(QWidget):
w.blockSignals(True)
# Now refresh values
if self.settings['Maximum frequency'] < self.settings['Minimum frequency']:
self.settings['Maximum frequency'] = self.settings['Minimum frequency']+1
if self.settings['Frequency increment'] > self.settings['Maximum frequency'] - self.settings['Minimum frequency']:
self.settings['Frequency increment'] = (self.settings['Maximum frequency'] - self.settings['Minimum frequency'])/2
self.vmin_sb.setValue(self.settings['Minimum frequency'])
self.vmax_sb.setValue(self.settings['Maximum frequency'])
self.vinc_sb.setValue(self.settings['Frequency increment'])
index = self.plot_type_cb.findText(self.settings['Plot type'], Qt.MatchFixedString)
self.plot_type_cb.setCurrentIndex(index)
try:
self.molar_cb_current_index = self.molar_definitions.index(self.settings["Molar definition"])
except:
self.molar_cb_current_index = 0
self.settings["Molar definition"] = self.molar_definitions[self.molar_cb_current_index]
self.molar_cb.setCurrentIndex(self.molar_cb_current_index)
self.natoms_sb.setValue(self.settings['Number of atoms'])
# Refresh the widgets that depend on the reader
self.reader = self.notebook.reader
if self.reader is not None:
self.set_concentrations()
# Reset the progress bar
self.notebook.progressbars_set_maximum(0)
#
# Unblock signals after refresh
#
for w in self.findChildren(QWidget):
w.blockSignals(False)
QCoreApplication.processEvents()
debugger.print('calling plot from refresh')
self.plot()
refreshRequired = False
debugger.print('Finished:: refresh', force)
return
def on_natoms_changed(self, value):
debugger.print('Start:: on_natoms_changed', value)
self.settings['Number of atoms'] = value
debugger.print('on natoms changed ', self.settings['Number of atoms'])
self.settings['concentration'] = 1000.0 / (avogadro_si * self.reader.volume * 1.0e-24 * self.settings['Number of atoms'] / self.reader.nions)
debugger.print('The concentration has been set', self.settings['Molar definition'], self.settings['concentration'])
self.refreshRequired = True
self.notebook.fitterTab.requestRefresh()
self.refresh()
debugger.print('Finished:: on_natoms_changed', value)
def on_plot_type_cb_activated(self, index):
debugger.print('Start:: on_plot_type_cb_activated', index)
self.settings['Plot type'] = self.plot_type_cb.currentText()
debugger.print('Changed plot type to ', self.settings['Plot type'])
self.refreshRequired = True
self.notebook.fitterTab.requestRefresh()
self.refresh()
debugger.print('Finished:: on_plot_type_cb_activated', index)
def on_funits_cb_activated(self, index):
debugger.print('Start:: on_funits_cb_activated', index)
if index == 0:
self.frequency_units = 'wavenumber'
else:
self.frequency_units = 'THz'
self.refreshRequired = True
self.notebook.fitterTab.requestRefresh()
self.refresh()
debugger.print('Frequency units changed to ', self.frequency_units)
debugger.print('Finished:: on_funits_cb_activated', index)
def on_molar_cb_activated(self, index):
debugger.print('Start:: on_molar_cb_activated', index)
self.molar_cb_current_index = index
self.settings['Molar definition'] = self.molar_definitions[index]
self.set_concentrations()
self.refreshRequired = True
self.notebook.fitterTab.requestRefresh()
self.refresh()
debugger.print('The concentration has been set', self.settings['Molar definition'], self.settings['concentration'])
debugger.print('Finished:: on_molar_cb_activated', index)
return
def set_concentrations(self):
debugger.print('Start:: set_concentration')
if self.settings['Molar definition'] == 'Molecules':
self.settings['concentration'] = 1000.0 / (avogadro_si * self.reader.volume * 1.0e-24 * self.settings['Number of atoms'] / self.reader.nions)
self.natoms_sb.setEnabled(True)
elif self.settings['Molar definition'] == 'Unit cells':
self.settings['concentration'] = 1000.0 / (avogadro_si * self.reader.volume * 1.0e-24)
self.settings['cell concentration'] = 1000.0 / (avogadro_si * self.reader.volume * 1.0e-24)
self.natoms_sb.setEnabled(False)
elif self.settings['Molar definition'] == 'Atoms':
self.settings['concentration'] = 1000.0 / (avogadro_si * self.reader.volume * 1.0e-24 / self.reader.nions)
self.natoms_sb.setEnabled(False)
debugger.print('Finished:: set_concentration')
return
def writeSpreadsheet(self):
debugger.print('Start::writeSpreadsheet')
if self.notebook.spreadsheet is None:
debugger.print('Finished::writeSpreadsheet spreadsheet is None')
return
# make sure the plottingTab is up to date
self.refresh()
# Handle powder plots
molarAbsorptionCoefficients = []
absorptionCoefficients = []
realPermittivities = []
imagPermittivities = []
sp_atrs = []
R_ps = []
R_ss = []
T_ps = []
T_ss = []
powder_legends = []
crystal_legends = []
# Deal with Scenarios
sp = self.notebook.spreadsheet
sp.selectWorkSheet('Scenarios')
sp.delete()
sp.writeNextRow(['A list of the scenarios used the calculation of the effective medium'],col=1)
for index,scenario in enumerate(self.notebook.scenarios):
if scenario.scenarioType == 'Powder':
direction = scenario.direction
depolarisation = scenario.depolarisation
sp.writeNextRow([''],col=1)
sp.writeNextRow(['Scenario '+str(index)],col=1,check=1)
settings = scenario.settings
for key in sorted(settings,key=str.lower):
sp.writeNextRow([key, settings[key]],col=1,check=1)
sp.writeNextRow(['Normalised unique direction']+direction.tolist(), col=1,check=1)
sp.writeNextRow(['Depolarisation matrix'], col=1,check=1)
sp.writeNextRow(depolarisation[0].tolist(), col=2, check=1)
sp.writeNextRow(depolarisation[1].tolist(), col=2, check=1)
sp.writeNextRow(depolarisation[2].tolist(), col=2, check=1)
molarAbsorptionCoefficients.append( scenario.get_result(self.vs_cm1,self.plot_types[0] ) )
absorptionCoefficients.append( scenario.get_result(self.vs_cm1,self.plot_types[1] ) )
realPermittivities.append( scenario.get_result(self.vs_cm1,self.plot_types[2] ) )
imagPermittivities.append( scenario.get_result(self.vs_cm1,self.plot_types[3] ) )
sp_atrs.append( scenario.get_result(self.vs_cm1,self.plot_types[4] ) )
powder_legends.append(scenario.settings['Legend'])
else:
sp.writeNextRow([''],col=1)
sp.writeNextRow(['Scenario '+str(index)],col=1,check=1)
settings = scenario.settings
for key in sorted(settings,key=str.lower):
sp.writeNextRow([key, settings[key]],col=1,check=1)
sp.writeNextRow(['Crystal axes in the laboratory frame'], col=1,check=1)
sp.writeNextRow(scenario.labframe_a.tolist(), col=2, check=1)
sp.writeNextRow(scenario.labframe_b.tolist(), col=2, check=1)
sp.writeNextRow(scenario.labframe_c.tolist(), col=2, check=1)
# Store the reflectance and transmittance
R_ps.append( scenario.get_result(self.vs_cm1,self.plot_types[5] ) )
R_ss.append( scenario.get_result(self.vs_cm1,self.plot_types[6] ) )
T_ps.append( scenario.get_result(self.vs_cm1,self.plot_types[7] ) )
T_ss.append( scenario.get_result(self.vs_cm1,self.plot_types[8] ) )
crystal_legends.append(scenario.settings['Legend'])
# Single crystal Permittivity
dielecv = self.notebook.settingsTab.get_crystal_permittivity(self.vs_cm1)
# Powder results
# Work out what molar units we are using
if len(molarAbsorptionCoefficients) > 0:
if self.settings['Molar definition'] == 'Molecules':
sheet_name = 'Powder Molar Absorption (mols)'
elif self.settings['Molar definition'] == 'Unit cells':
sheet_name = 'Powder Molar Absorption (cells)'
elif self.settings['Molar definition'] == 'Atoms':
sheet_name = 'Powder Molar Absorption (atoms)'
# Always write out the moles of cell
self.write_powder_results(sp, 'Powder Molar Absorption (cells)', self.vs_cm1, powder_legends, molarAbsorptionCoefficients)
if not self.settings['Molar definition'] == 'Unit cells':
# If some other molar definition has been used then write that out too
molarAbsorptionCoefficients_mols = []
molar_scaling = self.settings['cell concentration']/self.settings['concentration']
for absorption in molarAbsorptionCoefficients:
molarAbsorptionCoefficients_mols.append(molar_scaling * np.array(absorption))
self.write_powder_results(sp, sheet_name, self.vs_cm1, powder_legends, molarAbsorptionCoefficients_mols)
# end if
self.write_powder_results(sp, 'Powder Absorption', self.vs_cm1, powder_legends, absorptionCoefficients)
self.write_powder_results(sp, 'Powder Real Permittivity', self.vs_cm1, powder_legends, realPermittivities)
self.write_powder_results(sp, 'Powder Imaginary Permittivity', self.vs_cm1, powder_legends, imagPermittivities)
self.write_powder_results(sp, 'Powder ATR Reflectance', self.vs_cm1, powder_legends, sp_atrs)
# Single Crystal results
if len(R_ps) > 0:
self.write_crystal_results(sp, 'Crystal R_p', self.vs_cm1, crystal_legends, R_ps)
self.write_crystal_results(sp, 'Crystal R_s', self.vs_cm1, crystal_legends, R_ss)
self.write_crystal_results(sp, 'Crystal T_p', self.vs_cm1, crystal_legends, T_ps)
self.write_crystal_results(sp, 'Crystal T_s', self.vs_cm1, crystal_legends, T_ss)
if len(dielecv) > 0:
self.write_eps_results(sp, self.vs_cm1, dielecv)
debugger.print('Finished::writeSpreadsheet')
return
def write_eps_results(self, sp, vs, dielecv):
debugger.print('Start:: write_eps_results length vs',len(vs))
sp.selectWorkSheet('Real Crystal Permittivity')
sp.delete()
headers = ['frequencies (cm-1)', 'xx', 'yy', 'zz', 'xy', 'xz', 'yz' ]
sp.writeNextRow(headers,row=0, col=1)
for v,eps in zip(vs,dielecv):
eps_xx_r = np.real(eps[0][0])
eps_yy_r = np.real(eps[1][1])
eps_zz_r = np.real(eps[2][2])
eps_xy_r = np.real(eps[0][1])
eps_xz_r = np.real(eps[0][2])
eps_yz_r = np.real(eps[1][2])
output = [v, eps_xx_r, eps_yy_r, eps_zz_r, eps_xy_r, eps_xz_r, eps_yz_r ]
sp.writeNextRow(output, col=1,check=1)
sp.selectWorkSheet('Imag Crystal Permittivity')
sp.delete()
sp.writeNextRow(headers,row=0, col=1)
for v,eps in zip(vs,dielecv):
eps_xx_i = np.imag(eps[0][0])
eps_yy_i = np.imag(eps[1][1])
eps_zz_i = np.imag(eps[2][2])
eps_xy_i = np.imag(eps[0][1])
eps_xz_i = np.imag(eps[0][2])
eps_yz_i = np.imag(eps[1][2])
output = [v, eps_xx_i, eps_yy_i, eps_zz_i, eps_xy_i, eps_xz_i, eps_yz_i ]
sp.writeNextRow(output, col=1,check=1)
debugger.print('Finished:: write_eps_results length vs',len(vs))
return
def write_crystal_results(self, sp, name, vs, legends, yss):
"""
sp is the spreadsheet object
name is the worksheet name used for writing
vs an np.array of the frequencies
yss a list of np.arrays of the reflections and transmittance ]
headings the heading names for the yss
"""
debugger.print('Start:: write_crystal_results')
debugger.print('write_crystal_results name',name)
debugger.print('write_crystal_results legends',legends)
debugger.print('write_crystal_results length vs',len(vs))
sp.selectWorkSheet(name)
sp.delete()
headers = ['frequencies (cm-1)']
headers.extend(legends)
sp.writeNextRow(headers,row=0, col=1)
for iv,v in enumerate(vs):
output = [v]
for ys in yss:
output.append(ys[iv])
sp.writeNextRow(output, col=1,check=1)
debugger.print('Finished:: write_crystal_results')
return
def write_powder_results(self, sp, name, vs, legends, yss):
debugger.print('Start:: write powder results')
debugger.print('write_powder_results name',name)
debugger.print('write_powder_results legends',legends)
debugger.print('write_powder_results length vs',len(vs))
sp.selectWorkSheet(name)
sp.delete()
headers = ['frequencies (cm-1)']
#for isc,ys in enumerate(yss):
# headers.append('Scenario'+str(isc))
headers.extend(legends)
sp.writeNextRow(headers,row=0, col=1)
for iv,v in enumerate(vs):
output = [v]
for ys in yss:
output.append(ys[iv])
sp.writeNextRow(output, col=1,check=1)
debugger.print('Finished:: write powder results')
return
def plot(self):
# import matplotlib.pyplot as pl
# mp.use('Qt5Agg')
debugger.print('Start:: plot')
# Assemble the mainTab settings
settings = self.notebook.mainTab.settings
program = settings['Program']
filename = self.notebook.mainTab.getFullFileName()
reader = self.notebook.mainTab.reader
if reader is None:
debugger.print('Finished:: plot aborting because reader is NONE')
return
if program == '':
debugger.print('Finished:: plot aborting because program is not set')
return
if filename == '':
debugger.print('Finished:: plot aborting because filename is not set')
return
if self.notebook.settingsTab.CrystalPermittivity is None:
debugger.print('Finished:: plot aborting because settingTab.CrystalPermittivity is not set')
return
QApplication.setOverrideCursor(Qt.WaitCursor)
vmin = self.settings['Minimum frequency']
vmax = self.settings['Maximum frequency']
vinc = self.settings['Frequency increment']
self.vs_cm1 = np.arange(float(vmin), float(vmax)+0.5*float(vinc), float(vinc))
self.subplot = None
self.figure.clf()
if self.frequency_units == 'wavenumber':
xlabel = r'Frequency $\mathdefault{(cm^{-1})}}$'
xscale = 1.0
else:
xlabel = r'THz'
xscale = 0.02998
x = np.array(self.vs_cm1)
self.subplot = self.figure.add_subplot(111)
n = len(self.notebook.scenarios)
if self.notebook.settingsTab.refreshRequired:
n += 1
self.notebook.progressbars_set_maximum(n*len(x))
self.legends = []
plots = 0
for scenario in self.notebook.scenarios:
legend = scenario.settings['Legend']
self.legends.append(legend)
y = scenario.get_result(self.vs_cm1,self.settings['Plot type'])
if y is not None and len(y) > 0:
y = np.array(y)
if self.settings['Plot type'] == 'Powder Molar Absorption':
y = y * self.settings['cell concentration']/self.settings['concentration']
plots += 1
line, = self.subplot.plot(xscale*x,y,lw=2, label=legend )
if plots > 0:
self.subplot.set_xlabel(xlabel)
self.subplot.set_ylabel(self.plot_ylabels[self.settings['Plot type']])
self.subplot.legend(loc='best')
self.subplot.set_title(self.settings['Plot type'])
self.canvas.draw_idle()
QApplication.restoreOverrideCursor()
debugger.print('Finished:: plot')
def greyed_out(self):
"""Handle items that should be greyed out if they are not needed"""
debugger.print('Start:: greyed_out')
powder_scenarios_present = False
crystal_scenarios_present = False
for scenario in self.notebook.scenarios:
if scenario.scenarioType == 'Powder':
powder_scenarios_present = True
else:
crystal_scenarios_present = True
# end of for loop
#
# Disable any plot types that are not needed
#
self.plot_type_cb.model().item(0).setEnabled(True)
self.plot_type_cb.model().item(1).setEnabled(True)
self.plot_type_cb.model().item(2).setEnabled(True)
self.plot_type_cb.model().item(3).setEnabled(True)
self.plot_type_cb.model().item(4).setEnabled(True)
self.plot_type_cb.model().item(5).setEnabled(True)
self.plot_type_cb.model().item(6).setEnabled(True)
self.plot_type_cb.model().item(7).setEnabled(True)
self.plot_type_cb.model().item(8).setEnabled(True)
self.plot_type_cb.model().item(9).setEnabled(True)
self.plot_type_cb.model().item(10).setEnabled(True)
index = self.plot_type_cb.findText(self.settings['Plot type'], Qt.MatchFixedString)
if not powder_scenarios_present:
self.plot_type_cb.model().item(0).setEnabled(False)
self.plot_type_cb.model().item(1).setEnabled(False)
self.plot_type_cb.model().item(2).setEnabled(False)
self.plot_type_cb.model().item(3).setEnabled(False)
self.plot_type_cb.model().item(4).setEnabled(False)
if index < 5:
self.plot_type_cb.setCurrentIndex(5)
self.settings['Plot type'] = self.plot_type_cb.currentText()
if not crystal_scenarios_present:
self.plot_type_cb.model().item(5).setEnabled(False)
self.plot_type_cb.model().item(6).setEnabled(False)
self.plot_type_cb.model().item(7).setEnabled(False)
self.plot_type_cb.model().item(8).setEnabled(False)
self.plot_type_cb.model().item(9).setEnabled(False)
self.plot_type_cb.model().item(10).setEnabled(False)
if index >= 5:
self.plot_type_cb.setCurrentIndex(0)
self.settings['Plot type'] = self.plot_type_cb.currentText()
debugger.print('Finished:: greyed_out')
|
JohnKendrick/PDielec
|
PDielec/GUI/PlottingTab.py
|
Python
|
mit
| 31,711
|
[
"CRYSTAL"
] |
2d1e65aa5073157537df718314cec1cd8f36e22c66390fce38a2b48d7d8ca8e4
|
from __future__ import absolute_import
from sfepy.base.testing import TestCommon
import numpy as nm
from sfepy import data_dir
from sfepy.mesh.splinebox import SplineBox, SplineRegion2D
from sfepy.mesh.bspline import BSpline
from sfepy.discrete.fem import Mesh
from six.moves import range
def tetravolume(cells, vertices):
vol = 0.0
c1 = nm.ones((4,4), dtype=nm.float64)
mul = 1.0 / 6.0
for ic in cells:
c1[:,:3] = vertices[ic,:]
vol += mul * nm.linalg.det(c1)
return -vol
tolerance = 1e-6
class Test(TestCommon):
@staticmethod
def from_conf(conf, options):
return Test(conf=conf, options=options)
def test_spbox_3d(self):
"""
Check volume change of the mesh which is deformed using
the SplineBox functions.
"""
from sfepy.discrete.fem import Mesh
from sfepy.mesh.splinebox import SplineBox
mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.vtk')
conn = mesh.get_conn('3_4')
vol0 = tetravolume(conn, mesh.coors)
bbox = nm.array(mesh.get_bounding_box()).T
spbox = SplineBox(bbox, mesh.coors)
cpoints0 = spbox.get_control_points(init=True)
for ii in range(4):
for jj in range(4):
spbox.move_control_point((0, ii, jj), [-0.02, 0, 0])
coors = spbox.evaluate()
vol1 = tetravolume(conn, coors)
mesh.coors[:] = coors
spbox.set_control_points(cpoints0)
coors = spbox.evaluate()
vol2 = tetravolume(conn, coors)
ok = True
actual_volumes = (vol0, vol1, vol2)
expected_volumes = (1.22460186e-4, 1.46950423e-4, 1.22460186e-4)
for ii in range(3):
relerr = abs(actual_volumes[ii] - expected_volumes[ii])\
/ expected_volumes[ii]
ok = ok and (relerr < tolerance)
if not ok:
self.report('expected volumes:')
self.report(expected_volumes)
self.report('actual volumes:')
self.report(actual_volumes)
return ok
def test_spbox_2d(self):
"""
Check position of a given vertex in the deformed mesh.
"""
mesh = Mesh.from_file(data_dir + '/meshes/2d/square_tri1.mesh')
spb = SplineBox([[-1, 1], [-1, 0.6]], mesh.coors, nsg=[2,1])
spb.move_control_point(1, [0.1, -0.2])
spb.move_control_point(2, [0.2, -0.3])
spb.move_control_point(3, [0.0, -0.1])
pt0 = mesh.coors[175,:].copy()
mesh.cmesh.coors[:] = spb.evaluate()
pt1 = mesh.coors[175,:]
expected_distance = 0.165892726387
actual_distance = nm.linalg.norm(pt0 - pt1)
ok = nm.fabs(actual_distance - expected_distance)\
/ expected_distance < tolerance
if not ok:
self.report('expected distance:')
self.report(expected_distance)
self.report('actual distance:')
self.report(actual_distance)
return ok
def test_spbox_field(self):
"""
'Field' vs. 'coors'.
"""
mesh = Mesh.from_file(data_dir + '/meshes/2d/its2D.mesh')
coors = mesh.coors.copy()
bbox = nm.vstack((nm.amin(coors, 0), nm.amax(coors, 0))).T
coors_1 = coors.copy()
alpha = coors[:, 0]
spbox = SplineBox(bbox, coors, nsg=[1, 2], field=alpha)
dv1 = spbox.evaluate_derivative(6, 1)
spbox.move_control_point(6, -0.2)
c1 = spbox.evaluate()
coors_1[:, 0] = c1[:, 0]
alpha = coors[:, 1]
spbox = SplineBox(bbox, coors, nsg=[1, 2], field=alpha)
dv2 = spbox.evaluate_derivative(6, 1)
spbox.move_control_point(6, 0.2)
c2 = spbox.evaluate()
coors_1[:, 1] = c2[:, 0]
spbox = SplineBox(bbox, coors, nsg=[1, 2])
dv = spbox.evaluate_derivative(6, [1, 1])
spbox.move_control_point(6, [-0.2, 0.2])
coors_2 = spbox.evaluate()
rel_coor_dist = nm.linalg.norm(coors_2 - coors_1)\
/ nm.linalg.norm(coors_2)
ok = rel_coor_dist < tolerance
rel_dvel_dist = nm.linalg.norm(dv - nm.hstack([dv1, dv2]))\
/ nm.linalg.norm(dv)
ok = ok and rel_dvel_dist < tolerance
if not ok:
self.report('modified coordinates do not match, relative error:')
self.report(rel_coor_dist)
self.report('derivatives do not match, relative error:')
self.report(rel_dvel_dist)
return ok
def test_spregion2d(self):
"""
Check position of a given vertex in the deformed mesh.
"""
line_l = nm.array([[-1, 1], [-1, .5], [-1, 0], [-1, -.5]])
line_r = nm.array([[0, -.2], [.1, .2], [.3, .6], [.4, 1]])
sp_l = BSpline(3, is_cyclic=False)
sp_l.approximate(line_l, ncp=4)
kn_lr = sp_l.get_knot_vector()
sp_r = BSpline(3, is_cyclic=False)
sp_r.approximate(line_r, knots=kn_lr)
line_b = nm.array([[-1, -.5], [-.8, -.6], [-.5, -.4], [-.2, -.2],
[0, -.2]])
line_t = nm.array([[.4, 1], [0, 1], [-.2, 1], [-.6, 1], [-1, 1]])
sp_b = BSpline(3, is_cyclic=False)
sp_b.approximate(line_b, ncp=5)
kn_bt = sp_b.get_knot_vector()
sp_t = BSpline(3, is_cyclic=False)
sp_t.approximate(line_t, knots=kn_bt)
mesh = Mesh.from_file(data_dir + '/meshes/2d/square_tri1.mesh')
spb = SplineRegion2D([sp_b, sp_r, sp_t, sp_l], mesh.coors)
spb.move_control_point(5, [-.2, .1])
spb.move_control_point(10, [-.3, .2])
spb.move_control_point(15, [-.1, .2])
pt0 = mesh.coors[145,:].copy()
mesh.cmesh.coors[:] = spb.evaluate()
pt1 = mesh.coors[145,:]
expected_distance = 0.0908306614584
actual_distance = nm.linalg.norm(pt0 - pt1)
ok = nm.fabs(actual_distance - expected_distance)\
/ expected_distance < tolerance
if not ok:
self.report('expected distance:')
self.report(expected_distance)
self.report('actual distance:')
self.report(actual_distance)
return ok
|
vlukes/sfepy
|
tests/test_splinebox.py
|
Python
|
bsd-3-clause
| 6,186
|
[
"VTK"
] |
e479faaa10672ea814f4d9a3af680b9b2ffbbc540b07eb6f1d4b625833304bc9
|
""" Load and parse configuration file."""
from __future__ import print_function
import logging
import os
import datetime
import random
import subprocess
from dateutil.relativedelta import relativedelta
from taca.utils.config import CONFIG
from taca.utils import config as conf
from taca.utils import filesystem as fs
from taca.utils import statusdb
from io import open
logger = logging.getLogger(__name__)
def create_version_report(path):
# Creates the file version_report.txt for stuff run ngi_pipeline
with open(os.path.join(path, 'version_report.txt'), 'w') as VERSION_REPORT:
VERSION_REPORT.write(u'******\n')
VERSION_REPORT.write(u'README\n')
VERSION_REPORT.write(u'******\n')
VERSION_REPORT.write(u'\n')
VERSION_REPORT.write(u'Data has been aligned to to the reference using bwa. The raw alignments have then been deduplicated, recalibrated and cleaned using GATK. Quality control information was gathered using Qualimap. SNVs and indels have been called using the HaplotypeCaller. These variants were then funcionally annotated using snpEff. The pipeline used was Piper, see below for more information.\n')
VERSION_REPORT.write(u'\n')
VERSION_REPORT.write(u'The versions of programs and references used:\n')
VERSION_REPORT.write(u'piper: unknown\n')
VERSION_REPORT.write(u'bwa: 0.7.12\n')
VERSION_REPORT.write(u'samtools: 0.1.19\n')
VERSION_REPORT.write(u'qualimap: v2.2\n')
VERSION_REPORT.write(u'snpEff: 4.1\n')
VERSION_REPORT.write(u'snpEff reference: GRCh37.75\n')
VERSION_REPORT.write(u'gatk: 3.3-0-geee94ec\n')
VERSION_REPORT.write(u'\n')
VERSION_REPORT.write(u'reference: human_g1k_v37.fasta\n')
VERSION_REPORT.write(u'db_snp: gatk-bundle/2.8\n')
VERSION_REPORT.write(u'hapmap: gatk-bundle/2.8\n')
VERSION_REPORT.write(u'omni: gatk-bundle/2.8\n')
VERSION_REPORT.write(u'1000G_indels: gatk-bundle/2.8\n')
VERSION_REPORT.write(u'Mills_and_1000G_golden_standard_indels: gatk-bundle/2.8\n')
VERSION_REPORT.write(u'\n')
VERSION_REPORT.write(u'indel resource file: {Mills_and_1000G_gold_standard.indels.b37.vcf version: gatk-bundle/2.8}\n')
VERSION_REPORT.write(u'indel resource file: {1000G_phase1.indels.b37.vcf version: gatk-bundle/2.8}\n')
VERSION_REPORT.write(u'\n')
VERSION_REPORT.write(u'piper\n')
VERSION_REPORT.write(u'-----\n')
VERSION_REPORT.write(u'Piper is a pipeline system developed and maintained at the National Genomics Infrastructure build on top of GATK Queue. For more information and the source code visit: www.github.com/NationalGenomicsInfrastructure/piper\n')
def create_FC(incoming_dir, run_name, samplesheet, fastq_1 = None, fastq_2=None ):
# Create something like 160217_ST-E00201_0063_AHJHNYCCXX
path_to_fc = os.path.join(incoming_dir, run_name)
if os.path.exists(path_to_fc):
# This FC exists, skip it
return
fs.create_folder(path_to_fc)
fs.touch(os.path.join(path_to_fc, 'RTAComplete.txt'))
# Create folder Demultiplexing
fs.create_folder(os.path.join(path_to_fc, 'Demultiplexing'))
# Create folder Demultiplexing/Reports
fs.create_folder(os.path.join(path_to_fc, 'Demultiplexing', 'Reports'))
# Create folder Demultiplexing/Stats
fs.create_folder(os.path.join(path_to_fc, 'Demultiplexing', 'Stats'))
# Memorise SampleSheet stats
header = []
for key in samplesheet[0]:
header.append(key)
counter = 1
current_lane = ''
for line in samplesheet:
project_name = line.get('Sample_Project', line.get('Project', ''))
lane = line['Lane']
if current_lane == '':
current_lane = lane
elif current_lane != lane:
counter = 1
current_lane = lane
sample_id = line.get('SampleID', line.get('Sample_ID', ''))
sample_name = line.get('SampleName', line.get('Sample_Name', ''))
# Create dir structure
fs.create_folder(os.path.join(path_to_fc, 'Demultiplexing', project_name, sample_id))
# Now create the data
fastq_1_dest = '{}_S{}_L00{}_R1_001.fastq.gz'.format(sample_name, counter, lane)
fastq_2_dest = '{}_S{}_L00{}_R2_001.fastq.gz'.format(sample_name, counter, lane)
counter += 1
if fastq_1 is None:
fs.touch(os.path.join(path_to_fc, 'Demultiplexing', project_name,
sample_id, fastq_1_dest))
fs.touch(os.path.join(path_to_fc, 'Demultiplexing', project_name,
sample_id, fastq_2_dest))
else:
fs.do_symlink(fastq_1, os.path.join(path_to_fc, 'Demultiplexing',
project_name, sample_id, fastq_1_dest))
fs.do_symlink(fastq_2, os.path.join(path_to_fc, 'Demultiplexing',
project_name, sample_id, fastq_2_dest))
with open(os.path.join(path_to_fc, 'SampleSheet.csv'), 'w') as Samplesheet_file:
Samplesheet_file.write(u'[Header]\n')
Samplesheet_file.write(u'Date,2016-03-29\n')
Samplesheet_file.write(u'Investigator Name,Christian Natanaelsson\n')
Samplesheet_file.write(u'[Data]\n')
for key in header:
Samplesheet_file.write(u'{},'.format(key))
Samplesheet_file.write(u'\n')
for line in samplesheet:
for key in header:
Samplesheet_file.write(u'{},'.format(line[key]))
Samplesheet_file.write(u'\n')
def create_uppmax_env(ngi_config):
paths = {}
if 'analysis' not in ngi_config:
sys.exit('ERROR: analysis must be a field of NGI_CONFIG.')
try:
base_root = ngi_config['analysis']['base_root']
paths['base_root'] = base_root
sthlm_root = ngi_config['analysis']['sthlm_root']
paths['sthlm_root'] = sthlm_root
top_dir = ngi_config['analysis']['top_dir']
paths['top_dir'] = top_dir
except KeyError as e:
raise SystemExit('Config file is missing the key {}, make sure it have all required information'.format(str(e)))
if 'environment' not in ngi_config:
sys.exit('ERROR: environment must be a field of NGI_CONFIG.')
try:
# Get base root
flowcell_inboxes = ngi_config['environment']['flowcell_inbox']
flowcell_inbox = flowcell_inboxes[0] # I assume there is only one
paths['flowcell_inbox'] = flowcell_inbox
except ValueError as e:
sys.exit('key error, flowcell_inbox not found in "{}": {}'.format(ngi_config, e))
# Now I need to create the folders for this
if not os.path.exists(base_root):
sys.exit('base_root needs to exists: {}'.format(base_root))
fs.create_folder(flowcell_inbox)
if sthlm_root is None:
path_to_analysis = os.path.join(base_root, top_dir)
else:
path_to_analysis = os.path.join(base_root, sthlm_root, top_dir)
fs.create_folder(path_to_analysis)
return paths
def produce_analysis_qc_ngi(ngi_config, project_id):
analysis_dir = os.path.join(ngi_config['analysis']['base_root'],
ngi_config['analysis']['sthlm_root'],
ngi_config['analysis']['top_dir'],
'ANALYSIS', project_id)
data_dir = os.path.join(ngi_config['analysis']['base_root'],
ngi_config['analysis']['sthlm_root'],
ngi_config['analysis']['top_dir'],
'DATA', project_id)
qc_ngi_dir = os.path.join(analysis_dir, 'qc_ngi')
fs.create_folder(qc_ngi_dir)
for sample_id in os.listdir(data_dir):
sample_dir_qc = os.path.join(qc_ngi_dir, sample_id)
fs.create_folder(sample_dir_qc)
fastqc_dir = os.path.join(sample_dir_qc, 'fastqc')
fs.create_folder(fastqc_dir)
fastq_screen_dir = os.path.join(sample_dir_qc, 'fastq_screen')
fs.create_folder(fastq_screen_dir)
# Do not create more than this...
def produce_analysis_piper(ngi_config, project_id):
# Create piper_ngi
analysis_dir = os.path.join(ngi_config['analysis']['base_root'],
ngi_config['analysis']['sthlm_root'],
ngi_config['analysis']['top_dir'],
'ANALYSIS', project_id)
data_dir = os.path.join(ngi_config['analysis']['base_root'],
ngi_config['analysis']['sthlm_root'],
ngi_config['analysis']['top_dir'],
'DATA', project_id)
piper_ngi_dir = os.path.join(analysis_dir, 'piper_ngi')
fs.create_folder(piper_ngi_dir)
piper_dirs = ['01_raw_alignments',
'02_preliminary_alignment_qc',
'03_genotype_concordance',
'04_merged_aligments',
'05_processed_alignments',
'06_final_alignment_qc',
'07_variant_calls',
'08_misc']
for piper_dir in piper_dirs:
current_dir = os.path.join(piper_ngi_dir, piper_dir)
fs.create_folder(current_dir)
if piper_dir == '05_processed_alignments':
for sample_id in os.listdir(data_dir):
bam_file = '{}.clean.dedup.bam'.format(sample_id)
fs.touch(os.path.join(current_dir, bam_file))
if piper_dir == '07_variant_calls':
for sample_id in os.listdir(data_dir):
vcf_file = '{}.clean.dedup.recal.bam.raw.indel.vcf.gz'.format(sample_id)
fs.touch(os.path.join(current_dir, vcf_file))
current_dir = os.path.join(piper_ngi_dir, 'sbatch')
fs.create_folder(current_dir)
current_dir = os.path.join(piper_ngi_dir, 'setup_xml_files')
fs.create_folder(current_dir)
current_dir = os.path.join(piper_ngi_dir, 'logs')
fs.create_folder(current_dir)
create_version_report(current_dir)
def select_random_projects(projects_in, num_proj, application, projects_out, label):
chosen_projects = 0
iterations = 0 # Safe guard to avoid infinite loops
application_not_in_other = ['WG re-seq']
while chosen_projects != num_proj and iterations < 4*len(projects_in):
iterations += 1
selected_proj = random.choice(list(projects_in.keys()))
# Check if I have already picked up this element
already_chosen = False
for project_pair in projects_out:
if selected_proj == project_pair[0]:
already_chosen = True
if already_chosen:
continue # I am reprocessing an element I already saw. I skip it. iterations will avoid infinite loops
proj_value = projects_in[selected_proj]
if application == 'other':
# In this case everything expcept
if proj_value['application'] not in application_not_in_other:
# I select this one
projects_out.append([selected_proj, label])
chosen_projects += 1
elif application == proj_value['application']:
# I select this one
projects_out.append([selected_proj, label])
chosen_projects += 1
def create(projects, ngi_config_file, fastq_1, fastq_2):
statusdb_conf = CONFIG.get('statusdb')
if statusdb_conf is None:
logger.error('No statusdb field in taca configuration file')
return 1
if 'dev' not in statusdb_conf['url']:
logger.error('url for status db is {}, but dev must be specified in this case'.format(statusdb_conf['url']))
couch_connection = statusdb.StatusdbSession(statusdb_conf).connection
projectsDB = couch_connection['projects']
project_summary = projectsDB.view('project/summary')
projects_closed_more_than_three_months = {}
projects_closed_more_than_one_month_less_than_three = {}
projects_closed_less_than_one_month = {}
projects_opened = {}
current_date = datetime.datetime.today()
date_limit_one_year = current_date - relativedelta(months=6) #yes yes I know.. but in this way i am sure all data in in xflocell_db
date_limit_one_month = current_date - relativedelta(months=1)
date_limit_three_month = current_date - relativedelta(months=3)
for row in project_summary:
project_id = row['key'][1]
project_status = row['key'][0]
if 'application' not in row['value']:
continue
if row['value']['no_samples'] > 50:
continue # Skip large projects
application = row['value']['application']
if project_status == 'closed':
if 'close_date' in row['value']:
close_date = datetime.datetime.strptime(row['value']['close_date'], '%Y-%m-%d')
if close_date > date_limit_one_year: # If the project has been closed after the date limit
if close_date >= date_limit_one_month:
projects_closed_less_than_one_month[project_id] = {'project_name': row['value']['project_name'],
'application': application,
'no_samples': row['value']['no_samples']}
elif close_date < date_limit_one_month and close_date >= date_limit_three_month:
projects_closed_more_than_one_month_less_than_three[project_id] = {'project_name': row['value']['project_name'],
'application': application,
'no_samples': row['value']['no_samples']}
elif close_date < date_limit_three_month:
projects_closed_more_than_three_months[project_id] = {'project_name': row['value']['project_name'],
'application': application,
'no_samples': row['value']['no_samples']}
elif project_status == 'open':
if 'lanes_sequenced' in row['value'] and row['value']['lanes_sequenced'] > 0:
projects_opened[project_id] = {'project_name': row['value']['project_name'],
'application': application,
'no_samples': row['value']['no_samples']}
else:
print('status {}'.format(project_status))
## Now I can parse the x_flowcell db to check what I can and cannot use
whole_genome_projects = int(2*projects/3)
projects_to_reproduce = []
select_random_projects(projects_closed_more_than_three_months,
whole_genome_projects/4+1,
'WG re-seq',
projects_to_reproduce,
'WGreseq_tot_closed')
select_random_projects(projects_closed_more_than_one_month_less_than_three,
whole_genome_projects/4+1,
'WG re-seq',
projects_to_reproduce,
'WGreseq_closed_clean_no_del')
select_random_projects(projects_closed_less_than_one_month,
whole_genome_projects/4+1,
'WG re-seq',
projects_to_reproduce,
'WGreseq_closed_no_clean')
select_random_projects(projects_opened,
whole_genome_projects/4+1,
'WG re-seq',
projects_to_reproduce,
'WGreseq_open')
other_projects = int(projects/3)
select_random_projects(projects_closed_more_than_three_months,
other_projects/4+1,
'other',
projects_to_reproduce,
'noWGreseq_tot_closed')
select_random_projects(projects_closed_more_than_one_month_less_than_three,
other_projects/4+1,
'other',
projects_to_reproduce,
'noWGreseq_closed_clean_no_del')
select_random_projects(projects_closed_less_than_one_month,
other_projects/4+1,
'other',
projects_to_reproduce,
'noWGreseq_closed_no_clean')
select_random_projects(projects_opened,
other_projects/4+1,
'other',
projects_to_reproduce,
'noWGreseq_open')
# Create ngi_pipeline enviroment
print('#NGI_CONFIG varaible is {}. This variable needs to be in the .bashrc file'.format(ngi_config_file))
print('NGI_CONFIG={}'.format(ngi_config_file))
try:
ngi_config = conf.load_config(ngi_config_file)
except IOError as e:
print('ERROR: {}'.format(e.message))
# Create uppmax env
paths = create_uppmax_env(ngi_config)
print('#Going to reproduce {} projects (if this number is different from the one you specified.... trust me... do not worry'.format(len(projects_to_reproduce)))
# Scan over x_flowcell and reproduce FCs
flowcellDB = couch_connection['x_flowcells']
reproduced_projects = {}
for fc_doc in flowcellDB:
try:
samplesheet_csv = flowcellDB[fc_doc]['samplesheet_csv']
except KeyError:
continue # Parse only FC that have a samplesheet
# Check if this FC contains one of the proejcts I need to replicate.
projects_in_FC = set()
if 'SampleName' in samplesheet_csv[0]:
projects_in_FC = set([line['SampleName'].split('_')[0] for line in samplesheet_csv])
else:
projects_in_FC = set([line['Sample_Name'].split('_')[0] for line in samplesheet_csv])
found = False
for project_pair in projects_to_reproduce:
project = project_pair[0]
if project in projects_in_FC:
# This FC needs to be created
if not found:
# Create the FC only the first time I see a project belonging to it
create_FC(paths['flowcell_inbox'] , flowcellDB[fc_doc]['RunInfo']['Id'], samplesheet_csv, fastq_1, fastq_2)
found = True
# But I keep track of all projects-run I need to organise
if project not in reproduced_projects:
reproduced_projects[project] = []
reproduced_projects[project].append(flowcellDB[fc_doc]['RunInfo']['Id'])
print('#Reproduced {} project (if the numbers diffear do not worry, most likely we selected projects without runs)'.format(len(reproduced_projects)))
for project in projects_to_reproduce:
if project[0] in reproduced_projects:
print('# {}: {}'.format(project[0], project[1]))
# Need to output the command to organise
to_be_deleted = []
for project in reproduced_projects:
for FC in reproduced_projects[project]:
print('Running: ngi_pipeline_start.py organize flowcell {} -p {}'.format(FC, project))
with open('ngi_pipeline_local.logs', 'w') as NGILOGS:
return_value = subprocess.call(['ngi_pipeline_start.py',
'organize',
'flowcell',
'{}'.format(FC),
'-p',
'{}'.format(project)],
stdout=NGILOGS, stderr=NGILOGS)
if return_value > 0:
print('#project {} not organised: have a look to the logs, but most likely this projec is not in charon'.format(project))
if project not in to_be_deleted:
to_be_deleted.append(project)
for project in to_be_deleted:
del reproduced_projects[project]
# Create ANALYSIS --
for project in projects_to_reproduce:
if project[0] in reproduced_projects: # Only for projects that I know I have organised
produce_analysis_qc_ngi(ngi_config, project[0])
if project[1].startswith('WGreseq'):
produce_analysis_piper(ngi_config, project[0])
# Store in a file the results
with open('projects.txt', 'w') as PROJECTS:
for project in projects_to_reproduce:
if project[0] in reproduced_projects:
PROJECTS.write(u'{}:{}\n'.format(project[0], project[1]))
|
SciLifeLab/TACA
|
taca/testing/create_uppmax_like_env.py
|
Python
|
mit
| 20,945
|
[
"BWA",
"VisIt"
] |
9188b554b393a74c038d2c99334e8cee23dc84e46e5f93a06683401f9f7483b3
|
from __future__ import print_function
import time
import numpy as np
import sympy as sy
from bokeh.objects import Plot, DataRange1d, LinearAxis, ColumnDataSource, Glyph, Grid, Legend
from bokeh.widgetobjects import Slider, TextInput, HBox, VBox, Dialog
from bokeh.glyphs import Patch, Line, Text
from bokeh.document import Document
from bokeh.session import Session
from requests.exceptions import ConnectionError
document = Document()
session = Session()
session.use_doc('taylor_server')
session.load_document(document)
xs = sy.Symbol('x')
expr = sy.exp(-xs)*sy.sin(xs)
order = 1
def taylor(fx, xs, order, x_range=(0, 1), n=200):
x0, x1 = x_range
x = np.linspace(float(x0), float(x1), n)
fy = sy.lambdify(xs, fx, modules=['numpy'])(x)
tx = fx.series(xs, n=order).removeO()
if tx.is_Number:
ty = np.zeros_like(x)
ty.fill(float(tx))
else:
ty = sy.lambdify(xs, tx, modules=['numpy'])(x)
return x, fy, ty
def update_data():
x, fy, ty = taylor(expr, xs, order, (-2*sy.pi, 2*sy.pi), 200)
plot.title = "%s vs. taylor(%s, n=%d)" % (expr, expr, order)
legend.legends = {
"%s" % expr: [line_f_glyph],
"taylor(%s)" % expr: [line_t_glyph],
}
source.data = dict(x=x, fy=fy, ty=ty)
slider.value = order
session.store_document(document)
source = ColumnDataSource(data=dict(
x = [],
fy = [],
ty = [],
))
xdr = DataRange1d(sources=[source.columns("x")])
ydr = DataRange1d(sources=[source.columns("fy")])
plot = Plot(data_sources=[source], x_range=xdr, y_range=ydr, plot_width=800, plot_height=400)
line_f = Line(x="x", y="fy", line_color="blue", line_width=2)
line_f_glyph = Glyph(data_source=source, xdata_range=xdr, ydata_range=ydr, glyph=line_f)
plot.renderers.append(line_f_glyph)
line_t = Line(x="x", y="ty", line_color="red", line_width=2)
line_t_glyph = Glyph(data_source=source, xdata_range=xdr, ydata_range=ydr, glyph=line_t)
plot.renderers.append(line_t_glyph)
xaxis = LinearAxis(plot=plot, dimension=0)
yaxis = LinearAxis(plot=plot, dimension=1)
xgrid = Grid(plot=plot, dimension=0, axis=xaxis)
ygrid = Grid(plot=plot, dimension=1, axis=yaxis)
legend = Legend(plot=plot, orientation="bottom_left")
plot.renderers.append(legend)
def on_slider_value_change(obj, attr, old, new):
global order
order = int(new)
update_data()
def on_text_value_change(obj, attr, old, new):
try:
global expr
expr = sy.sympify(new, dict(x=xs))
except (sy.SympifyError, TypeError, ValueError) as exception:
dialog.content = str(exception)
dialog.visible = True
session.store_objects(dialog)
else:
update_data()
dialog = Dialog(title="Invalid expression", buttons=["Close"])
slider = Slider(start=1, end=20, value=order, step=1, title="Order:")
slider.on_change('value', on_slider_value_change)
text = TextInput(value=str(expr), title="Expression:")
text.on_change('value', on_text_value_change)
inputs = HBox(children=[slider, text])
layout = VBox(children=[inputs, plot, dialog])
document.add(layout)
update_data()
if __name__ == "__main__":
link = session.object_link(document._plotcontext)
print("Please visit %s to see the plots" % link)
try:
while True:
session.load_document(document)
time.sleep(0.5)
except KeyboardInterrupt:
print()
except ConnectionError:
print("Connection to bokeh-server was terminated")
|
sahat/bokeh
|
examples/glyphs/taylor_server.py
|
Python
|
bsd-3-clause
| 3,473
|
[
"VisIt"
] |
03ee25590c48c974efcdf5fb4a862438df1267adbc2eb9be93700a1853eb15e6
|
# =====================================================================
# HPClusterJobConfigurator
# Copyright (C) 2014 by Gabriel Nützi <gnuetzi (at) gmail (dot) com>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# =====================================================================
#!/usr/bin/env python3
# Prepares the render folder for cluster rendering
import sys,os, subprocess, ctypes,traceback
if sys.version_info[0] != 3:
print("This script is only python3 compatible!")
exit(1)
import jsonpickle
import itertools
from argparse import ArgumentParser
from attrdict import AttrMap
from HPCJobConfigurator.jobGenerators import importHelpers as iH
from HPCJobConfigurator.jobGenerators import commonFunctions as cF
from HPCJobConfigurator.jobGenerators.stringExpression import Expression as StrExpr
from . import getSimFileInfos
class MyOptParser(ArgumentParser):
def error(self,msg):
self.print_help()
raise ValueError("Error occured: " + msg)
STATUS_FINISHED = "finished"
STATUS_RECOVER = "recover"
def walkDependencies(stack,files):
invalidFrame = False
while stack:
currFile = stack[-1]
# only check dependencies of this file if it has not already been done
if "depCheck" not in currFile or not currFile["depCheck"]:
# build dep files
if currFile["status"] == STATUS_FINISHED :
# for finished files we dont need to check dependencies, but do it anyway
# if we have some depId which is not found it does not matter! (file is finished anyway)
depFiles = [ files["id"][depId] for depId in currFile["dependencies"] if depId in files["id"] ]
elif currFile["status"] == STATUS_RECOVER:
try:
depFiles = [ files["id"][depId] for depId in currFile["dependencies"] ]
except KeyError as e:
raise ValueError("For recoverable file %s could not locate dependency id : " % currFile["absPath"] + str(e) )
else:
raise ValueError("Status %s for file %s not valid!" % (currFile["status"], currFile["absPath"]))
try:
# For all dependecy files check status
for depFile in depFiles:
if depFile["status"] == STATUS_RECOVER:
print("""File %s depends on file %s which also
has status recover. It should be finished instead! -> classify frame as invalid """
% (currFile["absPath"],depFile["absPath"]) )
raise
currFile["depCheck"] = True
# push all dep files on the stack
stack += depFiles
except:
print("Could not resolve dependency for file: %s" % currFile)
invalidFrame = True
stack = None
else:
#already checked
stack.pop()
#endif
# endwhile
return invalidFrame
def writeFileMoverProcessFile(pipelineSpecs,processFrames):
pipelineTools = pipelineSpecs["pipelineTools"]
toolTasks = []
# Set up all input/output folders for each tool
for toolName,tool in pipelineTools.items():
toolTasks.append( {"type":"makeDirs", "dir" : tool["outputDir"]} )
toolTasks.append( {"type":"makeDirs", "dir" : tool["inputDir"]} )
# Set up simlinks (make link to outputs in input folders of parent tools)
if "linkAllTools" in pipelineSpecs["fileMover"] and pipelineSpecs["fileMover"]["linkAllTools"]:
for toolName,tool in pipelineTools.items():
for parentName in tool["parents"]:
d = pipelineTools[parentName]["inputDir"]
toolTasks.append( {"type":"symlink", "to" : os.path.join(d, toolName), "from" : tool["outputDir"]} )
# Set up all other dirs in makeDirectories
if "additionalTasks" in pipelineSpecs["fileMover"]:
toolTasks += pipelineSpecs["fileMover"]["additionalTasks"]
# set up all folders and links for each process
for procIdx, fList in enumerate(processFrames):
o = open(pipelineSpecs["fileMover"]["fileMoverProcessFile"].format(procIdx),"w+")
c=[]
# add for this process all tool tasks
c += toolTasks
# Set up all file move stuff for all frames (recover,dependend...)
for fr in fList:
for m in fr["fileMover"]:
c.append(m)
cF.jsonDump(c,o,indent=4)
o.close()
print("Wrote file mover pre-process files for all ranks for local directory")
def distributeFrames(opts,sortedFrames):
totalFrames = len(sortedFrames)
if totalFrames == 0:
print("No frames to render! -> exit")
return None
nFramePerProc = int(totalFrames / opts.processes);
frameCountPerProc = [nFramePerProc] * opts.processes;
for i in range(0, (totalFrames - nFramePerProc*opts.processes)):
frameCountPerProc[i] += 1;
print("Frames per Process:" + str(frameCountPerProc))
processes = opts.processes - frameCountPerProc.count(0);
print("Number of processes to use: %i" % processes)
frames = sortedFrames;
procFrames ={}; procIdx = 0;
processFrames = []; # the list of all procFrames dicts
procFrames = [] # for each processor -> [frames ,....]
while( procIdx < processes):
maxToTake = min( frameCountPerProc[procIdx] , len(frames) );
# consume as many as possible
procFrames += frames[0:maxToTake]
del frames[0:maxToTake]
frameCountPerProc[procIdx] -= maxToTake;
# move to next process
procIdx+=1;
processFrames.append(procFrames)
procFrames = []
# check if all frames are empty in framesPerFile
if(len(sortedFrames) != 0):
raise NameError("Something went wrong with distributing frames over processes")
# check if all frames have been distributed correctly
total = sum([ len(procFrames) for procFrames in processFrames ])
if(total != totalFrames):
raise NameError("Something went wrong with distributing frames over processes")
return processFrames
def recoverFrames(opts,allFrames,framesPerIdx, pipelineTools):
def addFile(frame,file,parent=None):
if "usedFile" in file and file["usedFile"] :
#this file has already been used
return
if file["status"]==STATUS_RECOVER:
print("added File: %s (recover)" % file["relPath"])
# add a file move to recover this file
frame["fileMover"].append(file["fileMoveRecover"])
# mark file as used
file["usedFile"] = True
elif file["status"]==STATUS_FINISHED:
if parent:
print("added File: %s (finished, dependent)" % file["relPath"])
# add a file move to recover this file
frame["fileMover"].append(file["fileMoveDependent"])
#print("id", id(frame), frame["fileMover"] )
# mark file as used
file["usedFile"] = True
def addTool(frame,toolName, visitedTools, parentToolName=None):
if toolName in visitedTools:
return
visitedTools.add(toolName);
frameTool = frame["tools"][toolName]
# if tool is not finished
if frameTool["status"] != STATUS_FINISHED:
# add all this tools checkpoint files
for outFileProp in frameTool["outputFiles"]:
if not outFileProp["cpFile"] == None:
addFile(frame,outFileProp["cpFile"],parentToolName)
# add all dependent tools
depTools = pipelineTools[toolName]["dependencies"]
if depTools:
for depTool in depTools:
addTool(frame,depTool,visitedTools,toolName)
else:
# we are finished, but
# if we have a parent tool (always not finished),
# we add our finished checkpoint files
if parentToolName:
# add all its checkpoint files of output files
for outFileProp in frameTool["outputFiles"]:
if outFileProp["cpFile"] == None:
raise ValueError("""Trying to add non existant checkpoint file of output file %s in tool
%s!""" % (str(outFileProp),toolName) )
addFile(frame,outFileProp["cpFile"],parentToolName)
#else:
# if no parent given, dont do anything
# get all file info
if opts.validationFileInfo:
print("Setup recovery from file info===============================")
print("Using validation file: %s", opts.validationFileInfo)
checkpointFiles = cF.jsonLoad(opts.validationFileInfo);
cpFiles = { "hash": {}, "all" : []}
for f in checkpointFiles:
tool = f["tool"]
fileId = f["hash"]
frameIdx = int(f["frameIdx"])
ha = f["hash"]
cpFiles["all"].append(f)
if ha in cpFiles["hash"]:
raise ValueError("File %s and %s have the same hash!" % (f["absPath"], cpFiles["hash"][ha]["absPath"] ) )
else:
cpFiles["hash"][ha] = f
print("===========================================================")
print("Determine status of all tools =============================")
# move over all frames, for each tool and match cpFiles
for frameIdx,frame in framesPerIdx.items():
finished = False;
for toolName,tool in frame["tools"].items():
# if there are checkpoint files corresponding to outputfiles of this tool
finishedOutFiles = 0
for outFileProp in tool["outputFiles"]:
ha = cF.makeUUID(outFileProp["hashString"])
outFileProp["hash"] = ha
if ha in cpFiles["hash"]: # we found checkpoint file
cpFile = cpFiles["hash"][ha]
absP = cpFile["absPath"]
print("Frame: %i " % frameIdx +
" checkpoint file matched:\n\t%s\n\thash: %s\n\tstatus: %s " % ( absP[:10]+'...'+absP[-20:] if len(absP) > 70 else absP ,ha, cpFile["status"] ))
outFileProp["cpFile"] = cpFile
if outFileProp["cpFile"]["status"] == STATUS_FINISHED:
finishedOutFiles += 1
else:
outFileProp["cpFile"] = None
# if all output files are finished -> tool is finished
if finishedOutFiles == len(tool["outputFiles"]):
tool["status"] = STATUS_FINISHED
print("Tool: %s -> finished" % toolName)
#print("Dependency check===========================================")
## for each frameIdx file list,
## travel dependency of each file and if some files are missing
## silently remove this file from the cpFilesPerFrame because this frameIdx can not be recovered!
#invalidFrameIdx = set() # list for not recoverable frames! (should not happen)
#for frameIdx,frame in framesPerIdx.items():
#if frameIdx not in cpFilesPerFrame:
#continue
#stack = cpFilesPerFrame[frameIdx]["all"][:] # shallow copy (remove files from stack)
#invalidFrame = walkDependencies(stack,cpFilesPerFrame[frameIdx])
#if invalidFrame:
#print("Invalid frameIdx: %i for recovery!" % frameIdx)
#invalidFrameIdx.add(frameIdx)
## continue to next frame
#continue
##endfor
## remove all files from all tools for invalid frames
#for k in invalidFrameIdx:
#for toolName,tool in framesPerIdx[k].items():
#if toolName in pipelineTools.keys():
#tool["checkpointFiles"] = []
#print("===========================================================")
# setup recovery for all frames
print("Setup pipeline tools with file info ========================")
for frame in allFrames:
# walk all tools in pipeline (visit all once!)
for tool in pipelineTools.keys():
addTool(frame,tool,set())
print("===============================================================")
def main():
parser = MyOptParser()
parser.add_argument("--pipelineSpecs", dest="pipelineSpecs", default="" ,
help="""Json file with info about the pipeline, fileValidation, fileValidationTools.""", metavar="<path>", required=True)
parser.add_argument("--validationFileInfo", dest="validationFileInfo", default="" ,
help="""XML file with info about render output files.""", metavar="<path>", required=False)
parser.add_argument("-p", "--processes", type=int, dest="processes", default=int(1),
help="The number of processes for the cluster render", metavar="<integer>", required=True)
try:
print("================== Prepare for Cluster Pipeline Job============")
opts= AttrMap(vars(parser.parse_args()))
pipelineSpecs = cF.jsonLoad(opts.pipelineSpecs)
pipelineTools = pipelineSpecs["pipelineTools"]
# tool1 ---> tool2 ----> tool3
# : dependency on tool2
# :tool3 is parent
# define parents and dependencies for all tools
for toolName,tool in pipelineTools.items():
if "dependencies" not in tool:
tool["dependencies"]=set()
tool["parents"]=set()
for toolName,tool in pipelineTools.items():
for dep in tool["dependencies"]:
t = pipelineTools[dep]
t["parents"].add(toolName)
frameGenerator = pipelineSpecs["frameGenerator"]
# fileValidationSpecs = d["fileValidationSpecs"]
# fileValidationTools = d["fileValidationTools"]
# Important job modules to hand over to frameGenerators and processFileWriters
importantModules = {"importHelpers":iH, "commonFunctions" : cF, "getSimFileInfos" : getSimFileInfos}
# Generate Frames =====================================================
mod, frameGenerator["generator"] = iH.importClassFromModuleString(frameGenerator["generator"])
# hand over some modules to the frame generator!
fgen = frameGenerator["generator"](pipelineSpecs, jobGenModules = importantModules )
allFrames,framesPerIdx, framesToDistribute = fgen(**frameGenerator["arguments"])
# =====================================================================
# Formatting frames ========================================================
# format strings in all settings (if possible) in allFrames again with itself
for i,fr in enumerate(allFrames):
allFrames[i] = cF.formatAll(fr,fr,formatter=StrExpr)
# Filter Frames =======================================================
recoverFrames(opts,allFrames,framesPerIdx,pipelineTools)
#======================================================================
# make a list of all frames which are not-completely finished
# (that are frames where all tools with no parent (the last one) are not finished, we need at least one )
notcompleted = lambda frame: sum( 1 if frame["tools"][toolName]["status"] != STATUS_FINISHED
else 0 for toolName,tool in pipelineTools.items() if len(tool["parents"])==0 ) > 0
framesCount = len(allFrames);
allFrames = list(filter(notcompleted, allFrames))
framesToDistribute = list(filter(notcompleted, framesToDistribute))
print("Removed %d finished frames!" % (framesCount - len(allFrames)) )
#count number of frames to render
totalFrames = len(framesToDistribute);
print("Number of frames to compute %i" % totalFrames)
if(totalFrames == 0):
print("No frames to distribute -> exit")
return 0
# Distribute the processes over the number of processes ===============
processFrames = distributeFrames(opts,framesToDistribute)
#======================================================================
# Write for each tool in the pipeline the process file, for each process a seperate one
for toolName,tool in pipelineTools.items():
# load the class and module for the tools processFileWriter
print("Load processFileGenerator for tool: %s" % toolName )
mod, tool["processFileGenerator"]["generator"] = iH.importClassFromModuleString(tool["processFileGenerator"]["generator"])
tool["processFileGenerator"]["generator"](pipelineSpecs, jobGenModules = importantModules).write(processFrames, **tool["processFileGenerator"]["arguments"])
# if we have some info file generator , produce the output
if "infoFileGenerator" in tool:
print("Load infoFileGenerator for tool: %s" % toolName )
mod, tool["infoFileGenerator"]["generator"] = iH.importClassFromModuleString(tool["infoFileGenerator"]["generator"])
tool["infoFileGenerator"]["generator"](pipelineSpecs, jobGenModules = importantModules).write(processFrames, **tool["infoFileGenerator"]["arguments"])
# Write FileMover process file =======================================
writeFileMoverProcessFile(pipelineSpecs,processFrames)
# =====================================================================
return 0
except Exception as e:
print("====================================================================")
print("Exception occured: " + str(e))
print("====================================================================")
traceback.print_exc(file=sys.stdout)
parser.print_help()
return 1
if __name__ == "__main__":
sys.exit(main());
|
gabyx/HPCJobConfigurator
|
HPCJobConfigurator/jobGenerators/jobGeneratorMPI/generatorToolPipeline/scripts/prepareToolPipeline.py
|
Python
|
mpl-2.0
| 19,785
|
[
"VisIt"
] |
2cb97cb392a52be2208565cef42de4fc691a560f8a00d2755f1fc68fd98a23cc
|
"""
Implementation of Harwell-Boeing read/write.
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
from __future__ import division, print_function, absolute_import
# TODO:
# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
# takes a lot of memory. Being faster would require compiled code.
# write is not efficient. Although not a terribly exciting task,
# having reusable facilities to efficiently read/write fortran-formatted files
# would be useful outside this module.
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from scipy.io.harwell_boeing._fortran_format_parser import \
FortranFormatParser, IntFormat, ExpFormat
from scipy.lib.six import string_types
__all__ = ["MalformedHeader", "read_hb", "write", "HBInfo", "HBFile",
"HBMatrixType"]
class MalformedHeader(Exception):
pass
class LineOverflow(Warning):
pass
def _nbytes_full(fmt, nlines):
"""Return the number of bytes to read to get every full lines for the
given parsed fortran format."""
return (fmt.repeat * fmt.width + 1) * (nlines - 1)
class HBInfo(object):
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented" \
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containg a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
"got: \n%s" % line)
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
"got: \n%s" % line)
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for " \
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError("Expected at least 72 character for third line, got:\n"
"%s" % line)
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if not mxtype.value_type in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError("Malformed data for third line: %s" % line)
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
% nelementals)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError("Expected 3 formats, got %s" % ct)
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
self.title = title
self.key = key
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got %s"
% pointer_format)
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got %s" %
indices_format)
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if not mxtype.value_type in ["real", "complex"]:
raise ValueError("Inconsistency between matrix type %s and " \
"value type %s" % (mxtype, values_format))
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if not mxtype.value_type in ["integer"]:
raise ValueError("Inconsistency between matrix type %s and " \
"value type %s" % (mxtype, values_format))
# XXX: fortran int -> dtype association ?
values_dtype = np.int
else:
raise ValueError("Unsupported format for values %s" % ct[2])
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append("%14d%14d%14d%14d" %
(self.total_nlines, self.pointer_nlines,
self.indices_nlines, self.values_nlines))
header.append("%14s%14d%14d%14d%14d" %
(self.mxtype.fortran_format.ljust(14), self.nrows,
self.ncols, self.nnon_zeros, 0))
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append("%16s%16s%20s" %
(pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value)
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=np.int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=np.int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
try:
return csc_matrix((val, ind-1, ptr-1),
shape=(header.nrows, header.ncols))
except ValueError as e:
raise e
def _write_data(m, fid, header):
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
class HBMatrixType(object):
"""Class to hold the matrix type."""
# q2f* translates qualified names to fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
_f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
_f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 " \
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError:
raise ValueError("Unrecognized format %s" % fmt)
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if not value_type in self._q2f_type.keys():
raise ValueError("Unrecognized type %s" % value_type)
if not structure in self._q2f_structure.keys():
raise ValueError("Unrecognized structure %s" % structure)
if not storage in self._q2f_storage.keys():
raise ValueError("Unrecognized storage %s" % storage)
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return "HBMatrixType(%s, %s, %s)" % \
(self.value_type, self.structure, self.storage)
class HBFile(object):
def __init__(self, file, hb_info=None):
"""Create a HBFile instance.
Parameters
----------
file : file-object
StringIO work as well
hb_info : HBInfo
Should be given as an argument for writing, in which case the file
should be writable.
"""
self._fid = file
if hb_info is None:
self._hb_info = HBInfo.from_file(file)
else:
#raise IOError("file %s is not writable, and hb_info "
# "was given." % file)
self._hb_info = hb_info
@property
def title(self):
return self._hb_info.title
@property
def key(self):
return self._hb_info.key
@property
def type(self):
return self._hb_info.mxtype.value_type
@property
def structure(self):
return self._hb_info.mxtype.structure
@property
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info)
def hb_read(file):
"""Read HB-format file.
Parameters
----------
file : str-like or file-like
If a string-like object, file is the name of the file to read. If a
file-like object, the data are read from it.
Returns
-------
data : scipy.sparse.csc_matrix instance
The data read from the HB file as a sparse matrix.
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
def _get_matrix(fid):
hb = HBFile(fid)
return hb.read_matrix()
if isinstance(file, string_types):
fid = open(file)
try:
return _get_matrix(fid)
finally:
fid.close()
else:
return _get_matrix(file)
def hb_write(file, m, hb_info=None):
"""Write HB-format file.
Parameters
----------
file : str-like or file-like
if a string-like object, file is the name of the file to read. If a
file-like object, the data are read from it.
m : sparse-matrix
the sparse matrix to write
hb_info : HBInfo
contains the meta-data for write
Returns
-------
None
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
if hb_info is None:
hb_info = HBInfo.from_data(m)
def _set_matrix(fid):
hb = HBFile(fid, hb_info)
return hb.write_matrix(m)
if isinstance(file, string_types):
fid = open(file, "w")
try:
return _set_matrix(fid)
finally:
fid.close()
else:
return _set_matrix(file)
|
Universal-Model-Converter/UMC3.0a
|
data/Python/x86/Lib/site-packages/scipy/io/harwell_boeing/hb.py
|
Python
|
mit
| 18,497
|
[
"exciting"
] |
1b06b122ca1c8ae9c81e9bcffd7cd11b0bae4f95a36f6e624afbf519df3ff13d
|
# -*- coding: utf-8 -*-
"""
Client for connecting to Canvas.
"""
import json
import logging
from http import HTTPStatus
import requests
from dateutil.parser import parse
from six.moves.urllib.parse import quote_plus, urljoin
from django.apps import apps
from integrated_channels.canvas.utils import CanvasUtil
from integrated_channels.exceptions import ClientError
from integrated_channels.integrated_channel.client import IntegratedChannelApiClient
from integrated_channels.utils import generate_formatted_log, refresh_session_if_expired
LOGGER = logging.getLogger(__name__)
MESSAGE_WHEN_COURSE_WAS_DELETED = 'Course was deleted previously, skipping create/update'
class CanvasAPIClient(IntegratedChannelApiClient):
"""
Client for connecting to Canvas.
Required Canvas auth credentials to instantiate a new client object.
- canvas_base_url : the base url of the user's Canvas instance.
- client_id : the ID associated with a Canvas developer key.
- client_secret : the secret key associated with a Canvas developer key.
- refresh_token : the refresh token token retrieved by the `oauth/complete`
endpoint after the user authorizes the use of their Canvas account.
Order of Operations:
Before the client can connect with an Enterprise user's Canvas account, the user will need to
follow these steps
- Create a developer key with their Canvas account
- Provide the ECS team with their developer key's client ID and secret.
- ECS will return a url for the user to visit which will prompt authorization and redirect when
the user hits the `confirm` button.
- The redirect will hit the `oauth/complete` endpoint which will use the passed oauth code
to request the Canvas oauth refresh token and save it to the Enterprise user's Canvas API config
- The refresh token is used at client instantiation to request the user's access token, this access
token is saved to the client's session and is used to make POST and DELETE requests to Canvas.
"""
def __init__(self, enterprise_configuration):
"""
Instantiate a new client.
Args:
enterprise_configuration (CanvasEnterpriseCustomerConfiguration): An enterprise customers's
configuration model for connecting with Canvas
"""
super().__init__(enterprise_configuration)
self.config = apps.get_app_config('canvas')
self.session = None
self.expires_at = None
self.course_create_url = CanvasUtil.course_create_endpoint(self.enterprise_configuration)
def create_content_metadata(self, serialized_data):
"""
Creates a course in Canvas.
If course is not found, easy! create it as usual
If course is found, it will have one of the following `workflow_state` values:
available: issue an update with latest field values
completed: this happens if a course has been concluded. Update it to change status
to offer by using course[event]=offer (which makes course published in Canvas)
unpublished: still just update
deleted: take no action for now.
For information of Canvas workflow_states see `course[event]` at:
https://canvas.instructure.com/doc/api/courses.html#method.courses.update
"""
self._create_session()
desired_payload = json.loads(serialized_data.decode('utf-8'))
course_details = desired_payload['course']
edx_course_id = course_details['integration_id']
located_course = CanvasUtil.find_course_by_course_id(
self.enterprise_configuration,
self.session,
edx_course_id
)
if not located_course:
LOGGER.info(
generate_formatted_log(
'canvas',
self.enterprise_configuration.enterprise_customer.uuid,
None,
edx_course_id,
f'Creating new course with payload {desired_payload}',
)
)
# Course does not exist: Create the course
status_code, response_text = self._post(
self.course_create_url,
serialized_data,
)
created_course_id = json.loads(response_text)['id']
# step 2: upload image_url and any other details
self._update_course_details(created_course_id, course_details)
else:
workflow_state = located_course['workflow_state']
if workflow_state.lower() == 'deleted':
LOGGER.error(
generate_formatted_log(
'canvas',
self.enterprise_configuration.enterprise_customer.uuid,
None,
edx_course_id,
'Course with integration_id = {edx_course_id} found in deleted state, '
'not attempting to create/update'.format(
edx_course_id=edx_course_id,
),
)
)
status_code = 200
response_text = MESSAGE_WHEN_COURSE_WAS_DELETED
else:
# 'unpublished', 'completed' or 'available' cases
LOGGER.warning(
generate_formatted_log(
'canvas',
self.enterprise_configuration.enterprise_customer.uuid,
None,
edx_course_id,
'Course with canvas_id = {course_id},'
'integration_id = {edx_course_id} found in workflow_state={workflow_state},'
' attempting to update instead of creating it'.format(
course_id=located_course["id"],
edx_course_id=edx_course_id,
workflow_state=workflow_state,
),
)
)
status_code, response_text = self._update_course_details(
located_course['id'],
course_details,
)
return status_code, response_text
def update_content_metadata(self, serialized_data):
self._create_session()
integration_id = self._extract_integration_id(serialized_data)
course_id = CanvasUtil.get_course_id_from_edx_course_id(
self.enterprise_configuration,
self.session,
integration_id,
)
url = CanvasUtil.course_update_endpoint(
self.enterprise_configuration,
course_id,
)
return self._put(url, serialized_data)
def delete_content_metadata(self, serialized_data):
self._create_session()
integration_id = self._extract_integration_id(serialized_data)
course_id = CanvasUtil.get_course_id_from_edx_course_id(
self.enterprise_configuration,
self.session,
integration_id,
)
url = '{}/api/v1/courses/{}'.format(
self.enterprise_configuration.canvas_base_url,
course_id,
)
return self._delete(url)
def create_assessment_reporting(self, user_id, payload):
"""
Send assessment level learner data, retrieved by the integrated channels exporter, to Canvas in the form of
an assignment and submission.
"""
learner_data = json.loads(payload)
self._create_session()
# Retrieve the Canvas user ID from the user's edx email (it is assumed that the learner's Edx
# and Canvas emails will match).
canvas_user_id = self._search_for_canvas_user_by_email(user_id)
canvas_course_id = self._handle_get_user_canvas_course(canvas_user_id, learner_data['courseID'])
# Depending on if the assignment already exists, either retrieve or create it.
# Assessment level reporting Canvas assignments use the subsection ID as the primary identifier, whereas
# course level reporting assignments rely on the course run key.
assignment_id = self._handle_canvas_assignment_retrieval(
learner_data['subsectionID'],
canvas_course_id,
learner_data['subsection_name'],
learner_data['points_possible'],
is_assessment_grade=True
)
# The percent grade from the grades api is represented as a decimal, but we can report the percent in the
# request body as the string: `<int percent grade>%`
update_grade_response = self._handle_canvas_assignment_submission(
"{}%".format(str(learner_data['grade'] * 100)),
canvas_course_id,
assignment_id,
canvas_user_id
)
return update_grade_response.status_code, update_grade_response.text
def create_course_completion(self, user_id, payload):
learner_data = json.loads(payload)
self._create_session()
# Retrieve the Canvas user ID from the user's edx email (it is assumed that the learner's Edx
# and Canvas emails will match).
canvas_user_id = self._search_for_canvas_user_by_email(user_id)
canvas_course_id = self._handle_get_user_canvas_course(canvas_user_id, learner_data['courseID'])
# Depending on if the assignment already exists, either retrieve or create it.
assignment_id = self._handle_canvas_assignment_retrieval(
learner_data['courseID'],
canvas_course_id,
'(Edx integration) Final Grade'
)
# Course completion percentage grades are exported as decimals but reported to Canvas as integer percents.
update_grade_response = self._handle_canvas_assignment_submission(
learner_data['grade'] * 100,
canvas_course_id,
assignment_id,
canvas_user_id
)
return update_grade_response.status_code, update_grade_response.text
def delete_course_completion(self, user_id, payload):
# Todo: There isn't a great way for users to delete course completion data
pass
def cleanup_duplicate_assignment_records(self, courses):
"""
For each course provided, iterate over assessments contained within the associated Canvas course and remove all
but the most recent, unique assessments sorted by `updated_at`.
Args:
- courses: iterable set of unique course IDs
"""
self._create_session()
failures = []
num_assignments_removed = 0
num_failed_assignments = 0
for edx_course in courses:
canvas_course = CanvasUtil.find_course_by_course_id(
self.enterprise_configuration,
self.session,
edx_course
)
# Add any missing courses to a list of failed courses
if not canvas_course:
failures.append(edx_course)
continue
canvas_assignments_url = CanvasUtil.course_assignments_endpoint(
self.enterprise_configuration,
canvas_course['id']
)
# Dict of most current, unique assignments in the course
current_assignments = {}
# Running list of duplicate assignments (ID's) that need to be deleted
assignments_to_delete = []
current_page_count = 0
more_pages_present = True
# Continue iterating over assignment responses while more paginated results exist or until the page count
# limit is hit
while more_pages_present and current_page_count < 150:
resp = self.session.get(canvas_assignments_url)
if resp.status_code >= 400:
LOGGER.error(
generate_formatted_log(
'canvas',
self.enterprise_configuration.enterprise_customer.uuid,
None,
edx_course,
'Failed to retrieve assignments for Canvas course: {} while running deduplication, '
'associated edx course: {}'.format(
canvas_course['id'],
edx_course
)
)
)
more_pages_present = False
else:
# Result of paginated response from the Canvas course assignments API
assignments_resp = resp.json()
# Ingest Canvas assignments API response and replace older duplicated assignments in current
# assignments. All older duplicated assignment IDs are added to `assignments_to_delete`
current_assignments, assignments_to_delete = self._parse_unique_newest_assignments(
current_assignments,
assignments_to_delete,
assignments_resp
)
# Determine if another page of results exists
next_page = CanvasUtil.determine_next_results_page(resp)
if next_page:
canvas_assignments_url = next_page
current_page_count += 1
else:
more_pages_present = False
# Remove all assignments from the current course and record the number of assignments removed
assignments_removed, individual_assignment_failures = \
self._bulk_remove_course_assignments(
canvas_course.get('id'),
assignments_to_delete
)
num_assignments_removed += len(assignments_removed)
num_failed_assignments += len(individual_assignment_failures)
if failures or num_failed_assignments:
message = 'Failed to dedup all assignments for the following courses: {}. ' \
'Number of individual assignments that failed to be deleted: {}. ' \
'Total assignments removed: {}.'.format(
failures,
num_failed_assignments,
num_assignments_removed
)
status_code = 400
else:
message = 'Removed {} duplicate assignments from Canvas.'.format(num_assignments_removed)
status_code = 200
return status_code, message
def update_participation_types(self, canvas_pks):
"""
For each canvas course provided, send an update with the parameter of
'restrict_enrollments_to_course_dates' to true in order to set the participation types of
all canvas courses to "Course" instead of "Term", which allows users to view the end date
Args:
- canvas_pk: list of primary keys of previously transmitted courses from canvas customers
"""
self._create_session()
for course_pk in canvas_pks:
integration_id = course_pk['content_id']
try:
course_id = CanvasUtil.get_course_id_from_edx_course_id(
self.enterprise_configuration,
self.session,
integration_id,
)
update_payload = {'course[restrict_enrollments_to_course_dates]': True}
url = CanvasUtil.course_update_endpoint(
self.enterprise_configuration,
course_id,
)
self._put(url, json.dumps(update_payload).encode('utf-8'))
except ClientError:
LOGGER.info(
generate_formatted_log(
'canvas',
self.enterprise_configuration.enterprise_customer.uuid,
None,
integration_id,
f'Skipped course with id {integration_id}, not found in Canvas',
)
)
# Private Methods
def _bulk_remove_course_assignments(self, course_id, assignments_to_remove):
"""
Take a Canvas course ID and remove all assessments associated a list of Canvas course assignment IDs.
Args:
- course_id: Canvas course ID
- assignments_to_remove: List of assignment ID's to be removed contained with the provided course.
"""
removed_items = []
failures = []
for assignment_id in assignments_to_remove:
try:
assignment_url = CanvasUtil.course_assignments_endpoint(
self.enterprise_configuration,
course_id
) + '/{}'.format(assignment_id)
self._delete(assignment_url)
removed_items.append(assignment_id)
except ClientError:
# we do not want assignment deletes to cause failures
failures.append(assignment_id)
return removed_items, failures
def _parse_unique_newest_assignments(self, current_assignments, assignments_to_delete, assignment_response_json):
"""
Ingest an assignments response from Canvas into a dictionary of most current, unique assignments found and a
running list of assignments to delete
Args:
- current_assignments: dictionary containing information on most current unique assignments contained within
a Canvas course.
Example:
{
'edX+816': {
'id': 10,
'updated_at': '2021-06-10T13:57:19Z',
},
'edX+100': {
'id': 11,
'updated_at': '2021-06-10T13:58:19Z',
}
}
- assignments_to_delete: list of Canvas assignment IDs associated with duplicate assignments to be deleted
- assignment_response_json: json repr of the requests' Response object returned by Canvas' course
assignments API
"""
for assignment in assignment_response_json:
integration_id = assignment['integration_id']
current_assignment = current_assignments.get(integration_id)
if current_assignment:
if parse(current_assignment['updated_at']) < parse(assignment['updated_at']):
assignments_to_delete.append(current_assignment['id'])
current_assignments[integration_id] = {
'id': assignment['id'],
'updated_at': assignment['updated_at']
}
else:
assignments_to_delete.append(assignment['id'])
else:
current_assignments[integration_id] = {
'id': assignment['id'],
'updated_at': assignment['updated_at']
}
return current_assignments, assignments_to_delete
def _update_course_details(self, course_id, course_details):
"""
Update a course for image_url (and possibly other settings in future).
Also sets course to 'offer' state by sending 'course[event]=offer',
which makes the course published in Canvas.
Arguments:
- course_id (Number): Canvas Course id
- course_details (dict): { 'image_url' } : optional, used if present for course[image_url]
"""
response_code = None
response_text = None
url = CanvasUtil.course_update_endpoint(
self.enterprise_configuration,
course_id,
)
# Providing the param `event` and setting it to `offer` is equivalent to publishing the course.
update_payload = {'course': {'event': 'offer'}}
try:
# there is no way to do this in a single request during create
# https://canvas.instructure.com/doc/api/all_resources.html#method.courses.update
if "image_url" in course_details:
update_payload['course']['image_url'] = course_details['image_url']
response_code, response_text = self._put(url, json.dumps(update_payload).encode('utf-8'))
except Exception as course_exc: # pylint: disable=broad-except
# we do not want course image update to cause failures
edx_course_id = course_details["integration_id"]
exc_string = str(course_exc)
LOGGER.error(
generate_formatted_log(
'canvas',
self.enterprise_configuration.enterprise_customer.uuid,
None,
edx_course_id,
'Failed to update details for course, '
'canvas_course_id={canvas_course_id}. '
'Details: {details}'.format(
canvas_course_id=course_id,
details=exc_string,
)
)
)
return response_code, response_text
def _post(self, url, data):
"""
Make a POST request using the session object to a Canvas endpoint.
Args:
url (str): The url to send a POST request to.
data (bytearray): The json encoded payload to POST.
"""
post_response = self.session.post(url, data=data)
if post_response.status_code >= 400:
raise ClientError(post_response.text, post_response.status_code)
return post_response.status_code, post_response.text
def _put(self, url, data):
"""
Make a PUT request using the session object to the Canvas course update endpoint
Args:
url (str): The canvas url to send update requests to.
data (bytearray): The json encoded payload to UPDATE. This also contains the integration
ID used to match a course with a course ID.
"""
put_response = self.session.put(url, data=data)
if put_response.status_code >= 400:
raise ClientError(put_response.text, put_response.status_code)
return put_response.status_code, put_response.text
def _delete(self, url):
"""
Make a DELETE request using the session object to the Canvas course delete endpoint.
this actually only 'conclude's a course. See this link for difference between
conclude and delete. Conclude allows bringing course back to 'offer' state
https://canvas.instructure.com/doc/api/courses.html#method.courses.destroy
Args:
url (str): The canvas url to send delete requests to.
"""
delete_response = self.session.delete(url, data='{"event":"conclude"}')
if delete_response.status_code >= 400:
raise ClientError(delete_response.text, delete_response.status_code)
return delete_response.status_code, delete_response.text
def _extract_integration_id(self, data):
"""
Retrieve the integration ID string from the encoded transmission data and apply appropriate
error handling.
Args:
data (bytearray): The json encoded payload intended for a Canvas endpoint.
"""
if not data:
raise ClientError("No data to transmit.", HTTPStatus.NOT_FOUND.value)
try:
decoded_payload = data.decode("utf-8")
decoded_json = json.loads(decoded_payload)
except AttributeError as error:
raise ClientError(
f"Unable to decode data. Type of data was {type(data)}", HTTPStatus.BAD_REQUEST.value
) from error
try:
integration_id = decoded_json['course']['integration_id']
except KeyError as error:
LOGGER.exception(generate_formatted_log(
'canvas',
self.enterprise_configuration.enterprise_customer.uuid,
None,
None,
f'KeyError processing decoded json. decoded payload was: {decoded_payload}'
), exc_info=error)
raise ClientError(
"Could not transmit data, no integration ID present.", HTTPStatus.NOT_FOUND.value
) from error
return integration_id
def _search_for_canvas_user_by_email(self, user_email):
"""
Helper method to make an api call to Canvas using the user's email as a search term.
Args:
user_email (string) : The email associated with both the user's Edx account and Canvas account.
"""
get_user_id_from_email_url = '{url_base}/api/v1/accounts/{account_id}/users?search_term={email_address}'.format(
url_base=self.enterprise_configuration.canvas_base_url,
account_id=self.enterprise_configuration.canvas_account_id,
email_address=quote_plus(user_email) # emails with unique symbols such as `+` can cause issues
)
rsps = self.session.get(get_user_id_from_email_url)
if rsps.status_code >= 400:
raise ClientError(
"Failed to retrieve user from Canvas: received response-[{}]".format(rsps.reason),
rsps.status_code
)
get_users_by_email_response = rsps.json()
try:
canvas_user_id = get_users_by_email_response[0]['id']
except (KeyError, IndexError) as error:
raise ClientError(
"No Canvas user ID found associated with email: {}".format(user_email),
HTTPStatus.NOT_FOUND.value
) from error
return canvas_user_id
def _get_canvas_user_courses_by_id(self, user_id):
"""Helper method to retrieve all courses that a Canvas user is enrolled in."""
get_users_courses_url = '{canvas_base_url}/api/v1/users/{canvas_user_id}/courses'.format(
canvas_base_url=self.enterprise_configuration.canvas_base_url,
canvas_user_id=user_id
)
rsps = self.session.get(get_users_courses_url)
if rsps.status_code >= 400:
raise ClientError(
"Could not retrieve Canvas course list. Received exception: {}".format(
rsps.reason
),
rsps.status_code
)
return rsps.json()
def _handle_canvas_assignment_retrieval(
self,
integration_id,
course_id,
assignment_name,
points_possible=100,
is_assessment_grade=False
):
"""
Helper method to handle course assignment creation or retrieval. Canvas requires an assignment
in order for a user to get a grade, so first check the course for the "final grade"
assignment. This assignment will have a matching integration id to the currently transmitting
learner data. If this assignment is not yet created on Canvas, send a post request to do so.
Args:
integration_id (str) : the string integration id from the edx course.
course_id (str) : the Canvas course ID relating to the course which the client is currently
transmitting learner data to.
"""
# Check if the course assignment already exists
canvas_assignments_url = '{canvas_base_url}/api/v1/courses/{course_id}/assignments'.format(
canvas_base_url=self.enterprise_configuration.canvas_base_url,
course_id=course_id
)
resp = self.session.get(canvas_assignments_url)
more_pages_present = True
current_page_count = 0
assignment_id = ''
# current_page_count serves as a timeout, limiting to a max of 150 pages of requests
while more_pages_present and current_page_count < 150:
if resp.status_code >= 400:
raise ClientError(
"Something went wrong retrieving assignments from Canvas. Got response: {}".format(
resp.text,
),
resp.status_code
)
assignments_resp = resp.json()
for assignment in assignments_resp:
try:
if assignment['integration_id'] == integration_id:
assignment_id = assignment['id']
break
# The integration ID check above should ensure that we have a 200 response from Canvas,
# but sanity catch if we have a unexpected response format
except (KeyError, ValueError, TypeError) as error:
raise ClientError(
"Something went wrong retrieving assignments from Canvas. Got response: {}".format(
resp.text,
),
resp.status_code
) from error
if not assignment_id:
next_page = CanvasUtil.determine_next_results_page(resp)
if next_page:
resp = self.session.get(next_page)
current_page_count += 1
else:
more_pages_present = False
else:
more_pages_present = False
# Canvas requires a course assignment for a learner to be assigned a grade.
# If no assignment has been made yet, create it.
if not assignment_id:
assignment_creation_data = {
'assignment': {
'name': assignment_name,
'submission_types': 'none',
'integration_id': integration_id,
'published': True,
'points_possible': points_possible,
'omit_from_final_grade': is_assessment_grade,
}
}
create_assignment_resp = self.session.post(canvas_assignments_url, json=assignment_creation_data)
try:
assignment_id = create_assignment_resp.json()['id']
except (ValueError, KeyError) as error:
raise ClientError(
"Something went wrong creating an assignment on Canvas. Got response: {}".format(
create_assignment_resp.text,
),
create_assignment_resp.status_code
) from error
return assignment_id
def _handle_canvas_assignment_submission(self, grade, course_id, assignment_id, canvas_user_id):
"""
Helper method to take necessary learner data and post to Canvas as a submission to the correlated assignment.
"""
submission_url = '{base_url}/api/v1/courses/{course_id}/assignments/' \
'{assignment_id}/submissions/{user_id}'.format(
base_url=self.enterprise_configuration.canvas_base_url,
course_id=course_id,
assignment_id=assignment_id,
user_id=canvas_user_id
)
# The percent grade from the grades api is represented as a decimal
submission_data = {
'submission': {
'posted_grade': grade
}
}
submission_response = self.session.put(submission_url, json=submission_data)
if submission_response.status_code >= 400:
raise ClientError(
"Something went wrong while posting a submission to Canvas assignment: {} under Canvas course: {}."
" Recieved response {} with the status code: {}".format(
assignment_id,
course_id,
submission_response.text,
submission_response.status_code
)
)
return submission_response
def _handle_get_user_canvas_course(self, canvas_user_id, learner_data_course_id):
"""
Helper method to take the Canvas user ID and edX course ID to find the matching Canvas course information.
"""
# With the Canvas user ID, retrieve all courses for the user.
user_courses = self._get_canvas_user_courses_by_id(canvas_user_id)
# Find the course who's integration ID matches the learner data course ID. This integration ID can be either
# an edX course run ID or course ID. Raise if no course found.
canvas_course_id = None
for course in user_courses:
if course['integration_id'] == learner_data_course_id:
canvas_course_id = course['id']
break
if not canvas_course_id:
raise ClientError(
"Course: {course_id} not found registered in Canvas for Canvas learner: {canvas_user_id}.".format(
course_id=learner_data_course_id,
canvas_user_id=canvas_user_id,
),
HTTPStatus.NOT_FOUND.value,
)
return canvas_course_id
def _create_session(self):
"""
Instantiate a new session object for use in connecting with Canvas. Each enterprise customer
connecting to Canvas should have a single client session.
Will only create a new session if token expiry has been reached
"""
self.session, self.expires_at = refresh_session_if_expired(
self._get_oauth_access_token,
self.session,
self.expires_at,
)
def _get_oauth_access_token(self):
"""Uses the client id, secret and refresh token to request the user's auth token from Canvas.
Returns:
access_token (str): the OAuth access token to access the Canvas API as the user
expires_in (int): the number of seconds after which token will expire
Raises:
HTTPError: If we received a failure response code from Canvas.
ClientError: If an unexpected response format was received that we could not parse.
"""
client_id = self.enterprise_configuration.client_id
client_secret = self.enterprise_configuration.client_secret
if not client_id:
raise ClientError(
"Failed to generate oauth access token: Client ID required.",
HTTPStatus.INTERNAL_SERVER_ERROR
)
if not client_secret:
raise ClientError(
"Failed to generate oauth access token: Client secret required.",
HTTPStatus.INTERNAL_SERVER_ERROR
)
if not self.enterprise_configuration.refresh_token:
raise ClientError(
"Failed to generate oauth access token: Refresh token required.",
HTTPStatus.INTERNAL_SERVER_ERROR
)
if not self.enterprise_configuration.canvas_base_url or not self.config.oauth_token_auth_path:
raise ClientError(
"Failed to generate oauth access token: Canvas oauth path missing from configuration.",
HTTPStatus.INTERNAL_SERVER_ERROR
)
auth_token_url = urljoin(
self.enterprise_configuration.canvas_base_url,
self.config.oauth_token_auth_path,
)
auth_token_params = {
'grant_type': 'refresh_token',
'client_id': client_id,
'client_secret': client_secret,
'state': str(self.enterprise_configuration.enterprise_customer.uuid),
'refresh_token': self.enterprise_configuration.refresh_token,
}
auth_response = requests.post(auth_token_url, auth_token_params)
if auth_response.status_code >= 400:
raise ClientError(auth_response.text, auth_response.status_code)
try:
data = auth_response.json()
return data['access_token'], data["expires_in"]
except (KeyError, ValueError) as error:
raise ClientError(auth_response.text, auth_response.status_code) from error
|
edx/edx-enterprise
|
integrated_channels/canvas/client.py
|
Python
|
agpl-3.0
| 36,426
|
[
"VisIt"
] |
22166d21b5efe27af7d00d03358a9f42274029c8a9e41be011609e4f5689e796
|
from django.conf import settings
from django.conf.urls import include, url
from django.urls import reverse_lazy
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic.base import RedirectView
from django.views import defaults as default_views
from rest_framework.routers import DefaultRouter
from users.views import UserViewSet
from instructions.views import InstructionViewSet
from salesforce.views import SalesforceCredentialViewSet, SalesforceAccessTokenViewSet
from backend.views import FrontendAppView
from rest_framework.authtoken import views
router = DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'credentials', SalesforceCredentialViewSet)
router.register(r'access_tokens', SalesforceAccessTokenViewSet)
router.register(r'instructions', InstructionViewSet)
urlpatterns = [
# the 'api-root' from django rest-frameworks default router
# http://www.django-rest-framework.org/api-guide/routers/#defaultrouter
# url(r'^$', RedirectView.as_view(url=reverse_lazy('api-root'), permanent=False), name='home'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# Your stuff: custom urls includes go here
url(r'^authentication', views.obtain_auth_token, name='authentication'),
url(r'^auth/', include('djoser.urls.authtoken'), name='auth'),
url(r'^api/v1/', include(router.urls)),
url(r'^', FrontendAppView.as_view(), name='home'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
Hawk94/dust
|
config/urls.py
|
Python
|
mit
| 2,306
|
[
"VisIt"
] |
804194648ca9afd76e8f277cd6fc1a31aa97ec29ea4e33325c93d2c218abfa40
|
#!/usr/bin/env python
import sys, os, shutil
from socket import gethostname
from datetime import datetime
# if it doesn't import this is probably icestorm and we won't use it anyway
try:
from sqlite3 import dbapi2 as sqlite
except:
pass
################################################################################
############### Generating html and json data from database ####################
################################################################################
class HTMLGen:
def __init__(self, app_name, con):
self.app_name = app_name
self.con = con
self.cr = self.con.cursor()
self.ex = self.cr.execute
# used to find moose resources, assume we are in trunk right now
self.trunk_dir = '.'
# the generated html goes here
self.base_dir = os.path.join(self.trunk_dir, 'html')
if not os.path.exists(self.base_dir):
os.mkdir(self.base_dir)
# generates the html and json data for this app
def generateHTML(self):
self.ex('select distinct test_name from timing where app_name = ?', (self.app_name,))
tests = self.cr.fetchall()
tests = [test[0] for test in tests]
# generate the app.html file containing the checkboxes
self.generateAppHTML(tests)
# now generate a json data file for each test
# each file has three parts, the timing listed by time, timing listed by
# revision, and other info listed by revision
base = os.path.join(self.base_dir, self.app_name)
for test in tests:
json = JSON_TEMPLATE.replace('$LABEL$', self.app_name + '.' + test)
# fill out revision vs timing
self.ex('select revision, seconds, date, scale, load from timing where app_name = ? and test_name = ? order by date',
(self.app_name,test))
results = self.cr.fetchall()
data = ['["' + str(r[0]) + '", ' + str(r[1]) + ']' for r in results]
data = '[ ' + ', '.join(data) + ' ]'
json = json.replace('$REV_DATA$', data)
data = [ ( self.app_name, test, str(r[0]), str(r[1]), str(datetime.fromtimestamp(r[2])), str(r[3]), str(r[4]) ) for r in results ]
data = [ '["' + '","'.join(d) + '"]' for d in data ]
data = '[ ' + ', '.join(data) + ' ]'
json = json.replace( '$INFO$', data )
# fill out data vs timing
# use another select statement because revisions and real dates may not exactly align
self.ex('select date, seconds from timing where app_name = ? and test_name = ? order by date',
(self.app_name,test))
results = self.cr.fetchall()
data = ['[' + str(r[0]*1000) + ', ' + str(r[1]) + ']' for r in results]
data = '[ ' + ', '.join(data) + ' ]'
json = json.replace('$TIME_DATA$', data)
fname = os.path.join(base, test + '.json')
f = open(fname, 'w')
f.write(json)
f.close()
# generates the app.html file that contains the list of checkboxes
def generateAppHTML(self, tests):
tests = [CHECKBOX_TEMPLATE.replace('$TEST$', test) for test in tests]
html = '\n'.join(tests) + CHECKBOX_END
base = os.path.join(self.base_dir, self.app_name)
if not os.path.exists(base):
os.mkdir(base)
f = open( os.path.join(base, self.app_name + '.html'), 'w' )
f.write(html)
f.close()
# templates to generate html out of
JSON_TEMPLATE = """{
"byrev":
{
"label": "$LABEL$",
"data": $REV_DATA$
},
"bytime":
{
"label": "$LABEL$",
"data": $TIME_DATA$
},
"info": $INFO$
}"""
CHECKBOX_TEMPLATE = '<div class="test"><input class="check" type="checkbox" id="$TEST$"></input><label for="$TEST$">$TEST$</label></div>'
CHECKBOX_END = '\n<br clear="all"/>'
################################################################################
####################### Database utility functions #############################
################################################################################
def createDB(fname):
print 'Creating empty database at ' + fname
con = sqlite.connect(fname)
cr = con.cursor()
cr.execute(CREATE_TABLE)
con.commit()
def dumpDB(fname):
print 'Dumping database at ' + fname
con = sqlite.connect(fname)
cr = con.cursor()
ex = cr.execute
ex('select * from timing')
rows = cr.fetchall()
for row in rows:
print row
CREATE_TABLE = """create table timing
(
app_name text,
test_name text,
revision text,
date int,
seconds real,
scale int,
load real
);"""
HELP_STRING = """Usage:
-h print this help message
-c create database with table timing in ~/timingDB/timing.sqlite
the database must either not exist or not have a timing table
-d dump the contents of table timing
[list of applications] using the data in the database, generate
json data for every application in the list. Assume db at
~/timingDB/timing.sqlite
"""
if __name__ == '__main__':
home = os.environ['HOME']
fname = os.path.join(home, 'timingDB/timing.sqlite')
argv = sys.argv[1:]
if '-h' in argv:
argv.remove('-h')
print HELP_STRING
sys.exit(0)
if '-c' in argv:
createDB(fname)
argv.remove('-c')
if '-d' in argv:
dumpDB(fname)
argv.remove('-d')
if len(argv) > 0:
con = sqlite.connect(fname)
for app in argv:
print "generating json data for " + app + "."
gen = HTMLGen(app, con)
gen.generateHTML()
|
gleicher27/Tardigrade
|
moose/framework/scripts/timingHTML/timing_utils.py
|
Python
|
lgpl-2.1
| 5,329
|
[
"MOOSE"
] |
96019e869263782044a6776fb8c58e9ece71ddd80a6c2a66c95697639e428c7d
|
#!/usr/bin/env python
import vtk
def main():
fileName = get_program_parameters()
colors = vtk.vtkNamedColors()
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Create the pipeline.
#
reader = vtk.vtkStructuredPointsReader()
reader.SetFileName(fileName)
threshold = vtk.vtkThresholdPoints()
threshold.SetInputConnection(reader.GetOutputPort())
threshold.ThresholdByUpper(200)
mask = vtk.vtkMaskPoints()
mask.SetInputConnection(threshold.GetOutputPort())
mask.SetOnRatio(5)
cone = vtk.vtkConeSource()
cone.SetResolution(11)
cone.SetHeight(1)
cone.SetRadius(0.25)
cones = vtk.vtkGlyph3D()
cones.SetInputConnection(mask.GetOutputPort())
cones.SetSourceConnection(cone.GetOutputPort())
cones.SetScaleFactor(0.4)
cones.SetScaleModeToScaleByVector()
lut = vtk.vtkLookupTable()
lut.SetHueRange(.667, 0.0)
lut.Build()
scalarRange = [0] * 2
cones.Update()
scalarRange[0] = cones.GetOutput().GetPointData().GetScalars().GetRange()[0]
scalarRange[1] = cones.GetOutput().GetPointData().GetScalars().GetRange()[1]
print("range: ", scalarRange[0], ", ", scalarRange[1])
vectorMapper = vtk.vtkPolyDataMapper()
vectorMapper.SetInputConnection(cones.GetOutputPort())
vectorMapper.SetScalarRange(scalarRange[0], scalarRange[1])
vectorMapper.SetLookupTable(lut)
vectorActor = vtk.vtkActor()
vectorActor.SetMapper(vectorMapper)
# Speed contours.
iso = vtk.vtkContourFilter()
iso.SetInputConnection(reader.GetOutputPort())
iso.SetValue(0, 175)
isoMapper = vtk.vtkPolyDataMapper()
isoMapper.SetInputConnection(iso.GetOutputPort())
isoMapper.ScalarVisibilityOff()
isoActor = vtk.vtkActor()
isoActor.SetMapper(isoMapper)
isoActor.GetProperty().SetRepresentationToWireframe()
isoActor.GetProperty().SetOpacity(0.25)
# Outline
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(reader.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(colors.GetColor3d("Black"))
# Add the actors to the renderer, set the background and size.
#
ren1.AddActor(outlineActor)
ren1.AddActor(vectorActor)
ren1.AddActor(isoActor)
ren1.SetBackground(colors.GetColor3d("Wheat"))
renWin.SetSize(640, 480)
cam1 = vtk.vtkCamera()
cam1.SetClippingRange(17.4043, 870.216)
cam1.SetFocalPoint(136.71, 104.025, 23)
cam1.SetPosition(204.747, 258.939, 63.7925)
cam1.SetViewUp(-0.102647, -0.210897, 0.972104)
cam1.Zoom(1.6)
ren1.SetActiveCamera(cam1)
# Render the image.
#
renWin.Render()
iren.Start()
def get_program_parameters():
import argparse
description = 'Visualizing blood flow in human carotid arteries.'
epilogue = '''
Cone glyphs indicate flow direction and magnitude.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename', help='carotid.vtk.')
args = parser.parse_args()
return args.filename
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/VisualizationAlgorithms/CarotidFlowGlyphs.py
|
Python
|
apache-2.0
| 3,480
|
[
"VTK"
] |
2aea7ccdea3cccbb31f9fe4f0b755a20879be5719f14d97693af8b5175c80a31
|
import linear_env
import sim_env
from actor import Actor
from critic import Critic
from replay_buffer import ReplayBuffer
import numpy as np
import tensorflow as tf
import keras.backend as kbck
import json
import time
import argparse
import matplotlib.pylab as plt
import os.path
def ou(x, mu, theta, sigma):
return theta * (mu - x) + sigma * np.random.randn(np.shape(x)[0])
def simulate(control, swmm ,flows):
best_reward = -1*np.inf
BUFFER_SIZE = 100000
BATCH_SIZE = 120
GAMMA = 0.99
TAU = 0.01 #Target Network HyperParameters
LRA = 0.0001 #Learning rate for Actor
LRC = 0.001 #Lerning rate for Critic
action_dim = 8
state_dim = 10
max_steps = 6000
np.random.seed(100)
EXPLORE = 100000.
episode_count = 1000
done = False
step = 0
epsilon = 1
if swmm:
if control:
#Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
kbck.set_session(sess)
# Actor, critic and replay buffer creation
actor = Actor(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA,flows)
critic = Critic(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
buff = ReplayBuffer(BUFFER_SIZE)
# Get the linear environment
reward_hist = []
for i in range(episode_count):
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
inp_name = "swmm/modelo2.inp"
inp = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + inp_name
vref = np.zeros((state_dim,))
env = sim_env.sim_env(inp,vref)
rainfile()
s_t = np.divide(env.reset(),env.vmax)
total_reward = 0.
for j in range(max_steps):
## Noise addition for exploration
## Ornstein-Uhlenbeck process
loss = 0
epsilon -= 1.0 / EXPLORE
a_t = np.zeros([1,action_dim])
noise_t = np.zeros([1,action_dim])
a_t_original = actor.munet.predict(s_t.reshape(1, s_t.shape[0]))
noise_t[0,:] = max(epsilon, 0) * ou(a_t_original[0,:], 0.5 , 1 , 1.5)
#noise_t[0,4:] = max(epsilon, 0) * ou(a_t_original[0,4:], 0.5 , 1 , 1.5)
a_t[0] = np.minimum(np.maximum(a_t_original[0] + noise_t[0],np.zeros(np.shape(a_t_original))),np.ones(np.shape(a_t_original)))
#Act over the system and get info of the next states
s_t1 , r_t, done = env.step(list(a_t[0]))
s_t1 = np.divide(s_t1,env.vmax)
#Add replay buffer
buff.add(s_t, a_t[0], r_t, s_t1, done)
#Do the batch update
batch = buff.getBatch(BATCH_SIZE)
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
next_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
# Get estimated q-values of the pair (next_state,mu(next_state))
actions_next = actor.target_munet.predict(next_states)
target_q_values = critic.target_qnet.predict([next_states, actions_next])
y_t = np.zeros(np.shape(actions))
for k in range(len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + GAMMA*target_q_values[k]
loss += critic.qnet.train_on_batch([states,actions], y_t)
a_for_grad = actor.munet.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
total_reward = total_reward + GAMMA*r_t
s_t = s_t1
if j%100==0:
print("Episode", i, "Step", j, "Reward", r_t, "Loss", loss)
if done:
break
reward_hist.append(total_reward)
np.save("reward_history_flows_"+str(flows).lower()+".npy",np.array(reward_hist))
if i%20 == 0:
print("Saving the networks...")
actor.munet.save_weights("./actors/anetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
critic.qnet.save_weights("./critics/cnetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
if total_reward > best_reward:
print("Saving Best Actor...")
np.save("best_reward"+"_flows_"+str(flows)+".npy",np.array(total_reward))
actor.munet.save_weights("./actors/best_anetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
critic.qnet.save_weights("./critics/best_cnetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
best_reward = total_reward
print("TOTAL REWARD @ " + str(i) +"-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("")
print("Finish.")
else:
inp_name = "swmm/modelo2.inp"
inp = os.path.dirname(os.path.abspath(__file__)) + os.path.sep + inp_name
vref = np.zeros((state_dim,))
rainfile()
env = sim_env.sim_env(inp,vref)
resv = env.free_sim()
print(np.shape(resv))
vmax = env.vmax
vmax[0] += 0.002
resv_norm = np.divide(resv,np.matlib.repmat(env.vmax,np.shape(resv)[0],1))
x = np.linspace(0,1800,np.shape(resv)[0])
font_labels = 16
font_legends = 18
ticksize = 16
width = 2.5
f , axarr = plt.subplots(nrows=1, ncols=2,figsize=(14,8),sharex=True )
## Plot Volume Results
lines = axarr[0].plot(x,resv_norm[:,:5],linewidth=width)
axarr[0].legend(lines , list(map(lambda x: "v"+str(x+1),range(5))),prop ={'size':font_legends})
axarr[0].set_title("Volumes - Tanks 1 to 5",fontsize=font_labels)
axarr[0].set_xlabel("Time(s)",fontsize=font_labels)
axarr[0].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[0].tick_params(labelsize=ticksize)
lines = axarr[1].plot(x,resv_norm[:,5:],linewidth=width)
axarr[1].legend(lines , list(map(lambda x: "v"+str(x+1) if x+1!=10 else "vT",range(5,10))),prop ={'size':font_legends})
axarr[1].set_title("Volumes - Tanks 6 to 9 and Storm Tank",fontsize=font_labels)
axarr[1].set_xlabel("Time(s)",fontsize=font_labels)
#axarr[0,1].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[1].tick_params(labelsize=ticksize)
plt.tight_layout()
plt.show()
else:
# Constants for the linear environment
Hs = 1800
A1 = 0.0063 ; mu1 = 500; sigma1 = 150
A2 = 0.018; mu2 = 550; sigma2 = 150
dt = 1
x = np.arange(Hs)
d = np.zeros((2,Hs))
if control:
#Tensorflow GPU optimization
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
kbck.set_session(sess)
# Actor, critic and replay buffer creation
actor = Actor(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA,flows)
critic = Critic(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)
buff = ReplayBuffer(BUFFER_SIZE)
# Get the linear environment
reward_hist = []
for i in range(episode_count):
print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))
A1 += 0.0004*np.random.rand()
mu1 += 50*np.random.rand()
sigma1 += 14*np.random.rand()
A2 += 0.00096*np.random.rand()
mu2 += 50*np.random.rand()
sigma2 += 14*np.random.rand()
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
s_t = np.divide(env.reset(),env.vmax)
total_reward = 0.
for j in range(max_steps):
## Noise addition for exploration
## Ornstein-Uhlenbeck process
loss = 0
epsilon -= 1.0 / EXPLORE
a_t = np.zeros([1,action_dim])
noise_t = np.zeros([1,action_dim])
a_t_original = actor.munet.predict(s_t.reshape(1, s_t.shape[0]))
noise_t[0,:] = max(epsilon, 0) * ou(a_t_original[0,:], 0.5 , 1 , 1.5)
#noise_t[0,4:] = max(epsilon, 0) * ou(a_t_original[0,4:], 0.5 , 1 , 1.5)
a_t[0] = a_t_original[0] + noise_t[0]
#Act over the system and get info of the next states
s_t1 , r_t, done, _ = env.step(a_t[0],flows=flows)
s_t1 = np.divide(s_t1,env.vmax)
#Add replay buffer
buff.add(s_t, a_t[0], r_t, s_t1, done)
#Do the batch update
batch = buff.getBatch(BATCH_SIZE)
states = np.asarray([e[0] for e in batch])
actions = np.asarray([e[1] for e in batch])
rewards = np.asarray([e[2] for e in batch])
next_states = np.asarray([e[3] for e in batch])
dones = np.asarray([e[4] for e in batch])
# Get estimated q-values of the pair (next_state,mu(next_state))
actions_next = actor.target_munet.predict(next_states)
target_q_values = critic.target_qnet.predict([next_states, actions_next])
y_t = np.zeros(np.shape(actions))
for k in range(len(batch)):
if dones[k]:
y_t[k] = rewards[k]
else:
y_t[k] = rewards[k] + GAMMA*target_q_values[k]
loss += critic.qnet.train_on_batch([states,actions], y_t)
a_for_grad = actor.munet.predict(states)
grads = critic.gradients(states, a_for_grad)
actor.train(states, grads)
actor.target_train()
critic.target_train()
total_reward = total_reward + GAMMA*r_t
s_t = s_t1
if j%100==0:
print("Episode", i, "Step", j, "Reward", r_t, "Loss", loss)
if done:
break
reward_hist.append(total_reward)
np.save("reward_history_flows_"+str(flows).lower()+".npy",np.array(reward_hist))
if i%20 == 0:
print("Saving the networks...")
actor.munet.save_weights("./actors/anetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
critic.qnet.save_weights("./critics/cnetwork_flows_"+str(flows).lower()+"_it_"+str(i)+".h5", overwrite=True)
if total_reward > best_reward:
print("Saving Best Actor...")
np.save("best_reward"+"_flows_"+str(flows)+".npy",np.array(total_reward))
actor.munet.save_weights("./actors/best_anetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
critic.qnet.save_weights("./critics/best_cnetwork_flows_"+str(flows).lower()+".h5", overwrite=True)
best_reward = total_reward
print("TOTAL REWARD @ " + str(i) +"-th Episode : Reward " + str(total_reward))
print("Total Step: " + str(step))
print("")
print("Finish.")
else:
d[0,:] = A1*np.exp((-1*(x-mu1)**2)/(2*sigma1**2))
d[1,:] = A2*np.exp((-1*(x-mu2)**2)/(2*sigma2**2))
vref = np.zeros((state_dim,))
env = linear_env.env(dt,d,vref)
resv, resf, resu = env.free_sim()
f , axarr = plt.subplots(nrows=1, ncols=2,figsize = (14,8) )
resv_norm = np.divide(np.transpose(resv),np.matlib.repmat(env.vmax,Hs,1))
resu = np.transpose(np.asarray(resu))
width = 2.5
font_legends = 16
font_labels = 16
ticksize = 16
## Plot Volume Results
lines = axarr[0].plot(x,resv_norm[:,:5],linewidth=width)
axarr[0].legend(lines , list(map(lambda x: "v"+str(x+1),range(5))),prop ={'size':font_legends})
axarr[0].set_title("Volumes - Tanks 1 to 5",fontsize=font_labels)
axarr[0].set_xlabel("Time(s)",fontsize=font_labels)
axarr[0].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[0].tick_params(labelsize=ticksize)
lines = axarr[1].plot(x,resv_norm[:,5:],linewidth=width)
axarr[1].legend(lines , list(map(lambda x: "v"+str(x+1) if x+1!=10 else "vT",range(5,10))),prop ={'size':font_legends})
axarr[1].set_title("Volumes - Tanks 6 to 9 and Storm Tank",fontsize=font_labels)
axarr[1].set_xlabel("Time(s)",fontsize=font_labels)
#axarr[0,1].set_ylabel("Volume(%vmax)",fontsize=font_labels)
axarr[1].tick_params(labelsize=ticksize)
plt.tight_layout()
plt.show()
def rainfile():
from math import exp
import numpy as np
from matplotlib import pylab as plt
#Gaussian Extension
A1 = 0.0063 ; mu1 = 500; sigma1 = 150
A2 = 0.018; mu2 = 550; sigma2 = 150
dt = 1
Hs = 1800
x = np.arange(0,Hs,dt)
d = [[],[]]
# dconst = 0.5*mpc_obj.k1*mpc_obj.vmax(1);
d[0] = A1*np.exp((-(x-mu1)**2)/(2*sigma1**2)) # Node 1 - left
d[1] = A2*np.exp((-(x-mu2)**2)/(2*sigma2**2)) # Node 2 - right
def secs_to_hour(secs_convert):
hour = secs_convert//3600
mins = (secs_convert%3600)//60
secs = secs_convert%60
return '{h:02d}:{m:02d}'.format(h=hour,m=mins)
secs_hour_vec = np.vectorize(secs_to_hour)
for k in (1,2):
with open('swmm/runoff%d.dat' % k, 'w') as f:
i = 0
for (t,val) in zip(secs_hour_vec(x), d[k-1]):
if i%60 == 0:
f.write(t+" "+str(val)+"\n")
i += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c","--control", type=int, choices = [0,1], help = "Choose between control(1) or free dynamics(0)")
parser.add_argument("-s","--swmm", type=int, choices = [0,1], help = "Choose between a simulation with swmm(1) or not(0)")
parser.add_argument("-f","--flow", type=int, choices = [0,1], help = "Choose between a simulation with flows(1) or not(0)")
args = parser.parse_args()
if args.flow == 1 and args.swmm == 1:
print("Conflicting option flow 1 and swmm 1")
else:
t0 = time.process_time()
simulate(control=args.control, swmm=args.swmm, flows = args.flow)
tf = time.process_time()
print("Elapsed time: ",tf-t0)
|
deot95/Tesis
|
Proyecto de Grado Ingeniería Electrónica/Workspace/Comparison/Full SWMM/ddpg.py
|
Python
|
mit
| 16,394
|
[
"Gaussian"
] |
a4a83ba5d7d0b6de1278a947d4fef33f0543bc91ea61a7d4e9b320884043b20a
|
# organization/views.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.http import HttpResponseRedirect, JsonResponse
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.messages import get_messages
from django.shortcuts import redirect, render
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_deleted_exception, handle_record_not_found_exception, handle_record_not_saved_exception
from election_office_measure.models import CandidateCampaign, CandidateCampaignList
from follow.models import FollowOrganizationManager
from .models import Organization
from position.models import PositionEntered, PositionEnteredManager, INFORMATION_ONLY, OPPOSE, \
STILL_DECIDING, SUPPORT
from voter.models import fetch_voter_id_from_voter_device_link
import wevote_functions.admin
from wevote_functions.models import convert_to_int, get_voter_device_id
ORGANIZATION_STANCE_CHOICES = (
(SUPPORT, 'We Support'),
(OPPOSE, 'We Oppose'),
(INFORMATION_ONLY, 'Information Only - No stance'),
(STILL_DECIDING, 'We Are Still Deciding Our Stance'),
)
logger = wevote_functions.admin.get_logger(__name__)
def organization_list_view(request):
messages_on_stage = get_messages(request)
organization_list = Organization.objects.order_by('name')
template_values = {
'messages_on_stage': messages_on_stage,
'organization_list': organization_list,
}
return render(request, 'organization/organization_list.html', template_values)
def organization_new_view(request):
# If person isn't signed in, we don't want to let them visit this page yet
if not request.user.is_authenticated():
return redirect('/admin')
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'organization/organization_edit.html', template_values)
def organization_edit_view(request, organization_id):
# If person isn't signed in, we don't want to let them visit this page yet
if not request.user.is_authenticated():
return redirect('/admin')
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
organization_on_stage_found = False
try:
organization_on_stage = Organization.objects.get(id=organization_id)
organization_on_stage_found = True
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Organization.DoesNotExist:
# This is fine, create new
pass
if organization_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'organization/organization_edit.html', template_values)
def organization_edit_process_view(request):
"""
Process the new or edit organization forms
:param request:
:return:
"""
# If person isn't signed in, we don't want to let them visit this page yet
if not request.user.is_authenticated():
return redirect('/admin')
organization_id = convert_to_int(request.POST['organization_id'])
organization_name = request.POST['organization_name']
# Check to see if this organization is already being used anywhere
organization_on_stage_found = False
try:
organization_query = Organization.objects.filter(id=organization_id)
if len(organization_query):
organization_on_stage = organization_query[0]
organization_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if organization_on_stage_found:
# Update
organization_on_stage.name = organization_name
organization_on_stage.save()
messages.add_message(request, messages.INFO, 'Organization updated.')
else:
# Create new
organization_on_stage = Organization(
name=organization_name,
)
organization_on_stage.save()
messages.add_message(request, messages.INFO, 'New organization saved.')
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save organization.')
return HttpResponseRedirect(reverse('organization:organization_list', args=()))
def organization_position_list_view(request, organization_id):
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
# election_id = 1 # TODO We will need to provide the election_id somehow, perhaps as a global variable?
organization_on_stage_found = False
try:
organization_query = Organization.objects.filter(id=organization_id)
if len(organization_query):
organization_on_stage = organization_query[0]
organization_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
organization_on_stage_found = False
if not organization_on_stage_found:
messages.add_message(request, messages.ERROR,
'Could not find organization when trying to retrieve positions.')
return HttpResponseRedirect(reverse('organization:organization_list', args=()))
else:
organization_position_list_found = False
try:
organization_position_list = PositionEntered.objects.order_by('stance')
organization_position_list = organization_position_list.filter(organization_id=organization_id)
if len(organization_position_list):
organization_position_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if organization_position_list_found:
template_values = {
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'organization_position_list': organization_position_list,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
}
return render(request, 'organization/organization_position_list.html', template_values)
def organization_add_new_position_form_view(request, organization_id):
# If person isn't signed in, we don't want to let them visit this page yet
if not request.user.is_authenticated():
return redirect('/admin')
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
all_is_well = True
organization_on_stage_found = False
try:
organization_on_stage = Organization.objects.get(id=organization_id)
organization_on_stage_found = True
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Organization.DoesNotExist:
# This is fine, create new
pass
if not organization_on_stage_found:
messages.add_message(request, messages.INFO,
'Could not find organization when trying to create a new position.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
# Prepare a drop down of candidates competing in this election
candidate_campaign_list = CandidateCampaignList()
candidate_campaigns_for_this_election_list \
= candidate_campaign_list.retrieve_candidate_campaigns_for_this_election_list()
if all_is_well:
template_values = {
'candidate_campaigns_for_this_election_list': candidate_campaigns_for_this_election_list,
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'organization_position_candidate_campaign_id': 0,
'possible_stances_list': ORGANIZATION_STANCE_CHOICES,
'stance_selected': SUPPORT, # Default stance
}
return render(request, 'organization/organization_position_edit.html', template_values)
def organization_delete_existing_position_process_form_view(request, organization_id, position_id):
"""
:param request:
:param organization_id:
:param position_id:
:return:
"""
# If person isn't signed in, we don't want to let them visit this page yet
if not request.user.is_authenticated():
return redirect('/admin')
organization_id = convert_to_int(organization_id)
position_id = convert_to_int(position_id)
# Get the existing position
organization_position_on_stage_found = False
if position_id > 0:
organization_position_on_stage = PositionEntered()
organization_position_on_stage_found = False
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_position_from_id(position_id)
if results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
if not organization_position_on_stage_found:
messages.add_message(request, messages.INFO,
"Could not find this organization's position when trying to delete.")
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
try:
organization_position_on_stage.delete()
except Exception as e:
handle_record_not_deleted_exception(e, logger=logger)
messages.add_message(request, messages.ERROR,
'Could not delete position.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
messages.add_message(request, messages.INFO,
'Position deleted.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
def organization_edit_existing_position_form_view(request, organization_id, position_id):
"""
In edit, you can only change your stance and comments, not who or what the position is about
:param request:
:param organization_id:
:param position_id:
:return:
"""
# If person isn't signed in, we don't want to let them visit this page yet
if not request.user.is_authenticated():
return redirect('/admin')
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
position_id = convert_to_int(position_id)
organization_on_stage_found = False
try:
organization_on_stage = Organization.objects.get(id=organization_id)
organization_on_stage_found = True
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Organization.DoesNotExist:
# This is fine, create new
pass
if not organization_on_stage_found:
messages.add_message(request, messages.INFO,
'Could not find organization when trying to edit a position.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
# Get the existing position
organization_position_on_stage = PositionEntered()
organization_position_on_stage_found = False
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_position_from_id(position_id)
if results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
if not organization_position_on_stage_found:
messages.add_message(request, messages.INFO,
'Could not find organization position when trying to edit.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
# Note: We have access to the candidate campaign through organization_position_on_stage.candidate_campaign
if organization_position_on_stage_found:
template_values = {
'is_in_edit_mode': True,
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'organization_position': organization_position_on_stage,
'possible_stances_list': ORGANIZATION_STANCE_CHOICES,
'stance_selected': organization_position_on_stage.stance,
}
return render(request, 'organization/organization_position_edit.html', template_values)
def organization_save_new_or_edit_existing_position_process_form_view(request):
"""
:param request:
:return:
"""
# If person isn't signed in, we don't want to let them visit this page yet
if not request.user.is_authenticated():
return redirect('/admin')
organization_id = convert_to_int(request.POST['organization_id'])
position_id = convert_to_int(request.POST['position_id'])
candidate_campaign_id = convert_to_int(request.POST['candidate_campaign_id'])
measure_campaign_id = convert_to_int(request.POST['measure_campaign_id'])
stance = request.POST.get('stance', SUPPORT) # Set a default if stance comes in empty
statement_text = request.POST.get('statement_text', '') # Set a default if stance comes in empty
more_info_url = request.POST.get('more_info_url', '')
# Make sure this is a valid organization before we try to save a position
organization_on_stage_found = False
try:
organization_query = Organization.objects.filter(id=organization_id)
if len(organization_query):
# organization_on_stage = organization_query[0]
organization_on_stage_found = True
except Exception as e:
# If we can't retrieve the organization, we cannot proceed
handle_record_not_found_exception(e, logger=logger)
if not organization_on_stage_found:
messages.add_message(
request, messages.ERROR,
"Could not find the organization when trying to create or edit a new position.")
return HttpResponseRedirect(reverse('organization:organization_list', args=()))
# Now retrieve the CandidateCampaign or the MeasureCampaign so we can save it with the Position
# We need either candidate_campaign_id or measure_campaign_id
if candidate_campaign_id:
try:
candidate_campaign_on_stage = CandidateCampaign.objects.get(id=candidate_campaign_id)
candidate_campaign_on_stage_found = True
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except CandidateCampaign.DoesNotExist as e:
handle_record_not_found_exception(e, logger=logger)
if not candidate_campaign_on_stage_found:
messages.add_message(
request, messages.ERROR,
"Could not find Candidate's campaign when trying to create or edit a new position.")
if position_id:
return HttpResponseRedirect(
reverse('organization:organization_position_edit', args=([organization_id], [position_id]))
)
else:
return HttpResponseRedirect(
reverse('organization:organization_position_new', args=([organization_id]))
)
elif measure_campaign_id:
logger.warn("measure_campaign_id FOUND. Look for MeasureCampaign here.")
else:
logger.warn("Neither candidate_campaign_id nor measure_campaign_id found")
messages.add_message(
request, messages.ERROR,
"Unable to find either Candidate or Measure.")
return HttpResponseRedirect(
reverse('organization:organization_position_list', args=([organization_id]))
)
organization_position_on_stage_found = False
logger.info("position_id: {position_id}".format(position_id=position_id))
# Retrieve position from position_id if it exists already
if position_id > 0:
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_position_from_id(position_id)
if results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
if not organization_position_on_stage_found:
# If a position_id hasn't been passed in, then we are trying to create a new position.
# Check to make sure a position for this org and candidate doesn't already exist
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_organization_candidate_campaign_position(
organization_id, candidate_campaign_id)
if results['MultipleObjectsReturned']:
messages.add_message(
request, messages.ERROR,
"We found more than one existing positions for this candidate. Please delete all but one position.")
return HttpResponseRedirect(
reverse('organization:organization_position_list', args=([organization_id]))
)
elif results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
# Now save existing, or create new
try:
if organization_position_on_stage_found:
# Update the position
organization_position_on_stage.stance = stance
organization_position_on_stage.statement_text = statement_text
organization_position_on_stage.more_info_url = more_info_url
organization_position_on_stage.save()
messages.add_message(
request, messages.INFO,
"Position on {candidate_name} updated.".format(
candidate_name=candidate_campaign_on_stage.candidate_name))
else:
# Create new
organization_position_on_stage = PositionEntered(
organization_id=organization_id,
candidate_campaign_id=candidate_campaign_on_stage.id,
stance=stance,
statement_text=statement_text,
more_info_url=more_info_url,
)
organization_position_on_stage.save()
messages.add_message(
request, messages.INFO,
"New position on {candidate_name} saved.".format(
candidate_name=candidate_campaign_on_stage.candidate_name))
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
logger.error("Problem saving PositionEntered for CandidateCampaign")
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
def organization_follow_view(request, organization_id):
logger.debug("organization_follow_view {organization_id}".format(
organization_id=organization_id
))
voter_device_id = get_voter_device_id(request)
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
follow_organization_manager = FollowOrganizationManager()
results = follow_organization_manager.toggle_on_voter_following_organization(voter_id, organization_id)
if results['success']:
return JsonResponse({0: "success"})
else:
return JsonResponse({0: "failure"})
def organization_unfollow_view(request, organization_id):
logger.debug("organization_unfollow_view {organization_id}".format(
organization_id=organization_id
))
voter_device_id = get_voter_device_id(request)
voter_id = fetch_voter_id_from_voter_device_link(voter_device_id)
follow_organization_manager = FollowOrganizationManager()
results = follow_organization_manager.toggle_off_voter_following_organization(voter_id, organization_id)
if results['success']:
return JsonResponse({0: "success"})
else:
return JsonResponse({0: "failure"})
|
wevoteeducation/WeVoteBase
|
organization/views.py
|
Python
|
mit
| 20,794
|
[
"VisIt"
] |
7ff45d3468545310d62db05ec2c4855e0e26b17d0e754ee41c4a1a8e19debcc6
|
"""
Shared code for builders.
For developers implementing a new builder,
you should inherit from :class:`Builder`.
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '5/29/13'
## Imports
# system
from abc import ABCMeta, abstractmethod
import copy
import logging
import multiprocessing
import Queue
import traceback
# local
from matgendb.builders import schema, util
from matgendb import util as dbutil
import six
## Logging
_log = util.get_builder_log("core")
## Exceptions
class BuildError(Exception):
def __init__(self, who, why):
errmsg = "Builder {} failed: {}".format(who, why)
Exception.__init__(self, errmsg)
## Versioning (experimental)
DB_VERSION = 1
## Functions
def parse_fn_docstring(fn):
"""Get parameter and return types from function's docstring.
Docstrings must use this format::
:param foo: What is foo
:type foo: int
:return: What is returned
:rtype: double
:return: A map of names, each with keys 'type' and 'desc'.
:rtype: tuple(dict)
"""
doc = fn.__doc__
params, return_ = {}, {}
param_order = []
for line in doc.split("\n"):
line = line.strip()
if line.startswith(":param"):
_, name, desc = line.split(":", 2)
name = name[6:].strip() # skip 'param '
params[name] = {'desc': desc.strip()}
param_order.append(name)
elif line.startswith(":type"):
_, name, desc = line.split(":", 2)
name = name[5:].strip() # skip 'type '
if not name in params:
raise ValueError("'type' without 'param' for {}".format(name))
params[name]['type'] = desc.strip()
elif line.startswith(":return"):
_1, _2, desc = line.split(":", 2)
return_['desc'] = desc
elif line.startswith(":rtype"):
_1, _2, desc = line.split(":", 2)
return_['type'] = desc.strip()
return params #, return_
## Classes
class Collections(object):
"""Interface to normalized names for collections.
After initialization with a MongoDB database and optional parameters,
you can access collections in `known_collections` as attributes.
"""
#: Collection names that are accessible as attributes
#: of an instance of this class
known_collections = [
'tasks', 'materials', 'diffraction_patterns',
'electrodes', 'conversion_electrodes', 'bandstructures', 'icsd',
'phase_diagrams', 'brototypes', 'electronic_structure']
MIN_VER = 1
MAX_VER = 1
def __init__(self, db, version=1, prefix=None, task_suffix=None):
"""Set collections from database.
:param db: MongoDB database, but really anything that acts like a dict. If None, it will be ignored.
:type db: dict-like object
:param version: Version of naming scheme for the collections
:type version: int
:param prefix: Prefix string to put before collection names, e.g. "dahn". Full
collection name will be <prefix>.<name>; don't include '.' in the input.
:type prefix: str
:param task_suffix: Add this suffix to the tasks collection. Used for merged collections for sandboxes.
:type task_suffix: str
:raise: ValueError if `version` is not known
"""
if not self.MIN_VER <= version <= self.MAX_VER:
raise ValueError("Bad version ({v:d}) not in range {v0} .. {v1} ".
format(v=version, v0=self.MIN_VER, v1=self.MAX_VER))
self._names, self._coll = {}, {}
if version == 1:
for name in self.known_collections:
full_name = "{}.{}".format(prefix, name) if prefix else name
if name == 'tasks' and task_suffix is not None:
full_name = "{}.{}".format(full_name, task_suffix)
self._names[name] = full_name
self._coll[full_name] = None
if db is None:
self._db = dict.fromkeys(self.known_collections, 1)
else:
self._db = db
self._prefix = prefix
def __getattr__(self, item):
if item in self._names:
coll_name = self._names[item]
coll = self._coll[coll_name]
if coll is None:
self._coll[coll_name] = coll = self._db[coll_name]
return coll
return self.__dict__[item]
def get_collection_name(self, alias):
return self._names[alias]
@property
def database(self):
"""Return the current database object.
:return: Current database object
"""
return self._db
def merge_tasks(core_collections, sandbox_collections, id_prefix, new_tasks, batch_size=100, wipe=False):
"""Merge core and sandbox collections into a temporary collection in the sandbox.
:param core_collections: Core collection info
:type core_collections: Collections
:param sandbox_collections: Sandbox collection info
:type sandbox_collections: Collections
"""
merged = copy.copy(sandbox_collections)
# create/clear target collection
target = merged.database[new_tasks]
if wipe:
_log.debug("merge_tasks.wipe.begin")
target.remove()
merged.database['counter'].remove()
_log.debug("merge_tasks.wipe.end")
# perform the merge
batch = []
for doc in core_collections.tasks.find():
batch.append(doc)
if len(batch) == batch_size:
target.insert(batch)
batch = []
if batch:
target.insert(batch)
batch = []
for doc in sandbox_collections.tasks.find():
doc['task_id'] = id_prefix + '-' + str(doc['task_id'])
batch.append(doc)
if len(batch) == batch_size:
target.insert(batch)
batch = []
if batch:
target.insert(batch)
class HasExamples(object):
"""Mix-in class for checking the output of a builder.
This is a way to get some static checking of the schema of inserted documents,
without imposing the burden of schema-checking every single document.
The check is static in the sense that it will only be run by the unit tests.
See the
`pymatpro.db.builders.test.test_porous_builder` module for an example of
how to use this to perform unit tests.
"""
def examples(self):
"""Return example document(s) for collection(s).
This must be implemented in subclasses.
:return: List of pairs (doc, collection_name)
:rtype: list(dict,str)
"""
raise NotImplementedError()
def validate_examples(self, fail_fn):
"""Check the examples against the schema.
:param fail_fn: Pass failure messages to this function
:type fail_fn: function(str)
"""
for collection, doc in self.examples():
_log.debug("validating example in collection {}".format(collection))
sch = schema.get_schema(collection) # with more err. checking
result = sch.validate(doc)
_log.debug("validation result: {}".format("OK" if result is None else result))
if result is not None:
fail_fn("Failed to validate sample document: {}".format(result))
class Builder(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for all builders
To implement a new builder, inherit from this class and
define the :meth:`get_items` and :meth:`process_item` methods.
See the online documentation for details.
"""
def __init__(self, ncores=1):
"""Create new builder for threaded or multiprocess execution.
:param ncores: Desired number of threads or processes to run
:type ncores: int
:raise: ValueError for bad 'config' arg
"""
sequential = (ncores == 1)
if sequential:
self._seq = True
self._queue = Queue.Queue()
else:
self._seq = False
self._mgr = multiprocessing.Manager()
self._ncores = ncores if ncores > 0 else 15
self._queue = multiprocessing.Queue()
self._run_parallel_fn = self._run_parallel_multiprocess
self._status = BuilderStatus(ncores, self)
# ----------------------------
# Override these in subclasses
# ----------------------------
def get_parameters(self):
"""Return key/value pairs that will be passed to get_items().
This is an alternative to the use of special docstrings as described
in :meth:`get_items`.
If the type of the argument is 'QueryEngine', then the driver program
will add options for, and create, a matgendb.query_engine.QueryEngine
instance. The value given for this argument will be interpreted as
the MongoDB collection name.
Some other basic Python types -- list, dict, int, float -- are
automatically parsed. If an empty value is supplied for these, then
None will be passed.
:return: {'param_name': {'type': 'param_type', 'desc':'description'},
'param2_name': {'type': 'param2_type', 'desc':'descr2'}, ..}
:rtype: dict
"""
return None # means: use docstring
@abstractmethod
def get_items(self):
"""Perform one-time setup at the top of a run, returning
an iterator on items to use as input.
If get_parameters() doesn't return None, this method is used to
discover the names and types of this function's parameters.
Otherwise, the docstring of this function is used.
This docstring must use the ':' version of the restructured
text style. For example::
class MyBuilder(Builder):
def get_items(self, source=None, target=None):
'''
:param source: The input porous materials collection
:type source: QueryEngine
:param target: The output materials collection
:type target: QueryEngine
'''
More details on the parameters is in :meth:`get_items_parameters`.
:return: iterator
"""
return [{"Hello": 1}, {"World": 2}]
@abstractmethod
def process_item(self, item):
"""Implement the analysis for each item of work here.
:param item: One item of work from the queue (i.e., one item from the iterator that
was returned by the `setup` method).
:type item: object
:return: Status code, 0 for OK
:rtype: int
"""
return 0
def finalize(self, had_errors):
"""Perform any cleanup actions after all items have been processed.
Subclasses may choose not to implement this, in which case it is a no-op.
:param had_errors: True if the run itself had errors.
:type had_errors: bool
:return: True if nothing went wrong, else False
"""
return True
# -----------------------------
# Utility methods
# -----------------------------
def shared_dict(self):
"""Get dict that can be shared between parallel processes.
"""
if self._seq:
return dict()
else:
return self._mgr.dict()
def shared_list(self):
"""Get list that can be shared between parallel processes.
"""
if self._seq:
return list()
else:
return self._mgr.list()
# -----------------------------
# Public/internal
# -----------------------------
def run(self, user_kw=None, build_kw=None):
"""Run the builder.
:param user_kw: keywords from user
:type user_kw: dict
:param build_kw: internal settings
:type build_kw: dict
:return: Number of items processed
:rtype: int
"""
user_kw = {} if user_kw is None else user_kw
build_kw = {} if build_kw is None else build_kw
n = self._build(self.get_items(**user_kw), **build_kw)
finalized = self.finalize(self._status.has_failures())
if not finalized:
_log.error("Finalization failed")
return n
def connect(self, config):
"""Connect to database with given configuration, which may be a dict or
a path to a pymatgen-db configuration.
"""
if isinstance(config, str):
conn = dbutil.get_database(config_file=config)
elif isinstance(config, dict):
conn = dbutil.get_database(settings=config)
else:
raise ValueError("Configuration, '{}', must be a path to "
"a configuration file or dict".format(config))
return conn
# -----------------------------
def _build(self, items, chunk_size=10000):
"""Build the output, in chunks.
:return: Number of items processed
:rtype: int
"""
_log.debug("_build, chunk_size={:d}".format(chunk_size))
n, i = 0, 0
for i, item in enumerate(items):
if i == 0:
_log.debug("_build, first item")
if 0 == (i + 1) % chunk_size:
if self._seq:
self._run(0)
else:
self._run_parallel_fn() # process the chunk
if self._status.has_failures():
break
n = i + 1
self._queue.put(item)
# process final chunk
if self._seq:
self._run(0)
else:
self._run_parallel_fn()
if not self._status.has_failures():
n = i + 1
return n
def _run_parallel_multiprocess(self):
"""Run processes from queue
"""
_log.debug("run.parallel.multiprocess.start")
processes = []
ProcRunner.instance = self
for i in range(self._ncores):
self._status.running(i)
proc = multiprocessing.Process(target=ProcRunner.run, args=(i,))
proc.start()
processes.append(proc)
for i in range(self._ncores):
processes[i].join()
code = processes[i].exitcode
self._status.success(i) if 0 == code else self._status.fail(i)
_log.debug("run.parallel.multiprocess.end states={}".format(self._status))
def _run(self, index):
"""Run method for one thread or process
Just pull an item off the queue and process it,
until the queue is empty.
:param index: Sequential index of this process or thread
:type index: int
"""
while 1:
try:
item = self._queue.get(timeout=2)
self.process_item(item)
except Queue.Empty:
break
except Exception as err:
_log.error("In _run(): {}".format(err))
if _log.isEnabledFor(logging.DEBUG):
_log.error(traceback.format_exc())
self._status.fail(index)
raise
self._status.success(index)
def __str__(self):
return self.__class__.__name__
class BuilderStatus(object):
"""Status of a Builder object run.
"""
# States
WAIT, RUNNING, SUCCESS, FAILURE = 0, 1, 2, -1
# For printing.
_NAMES = {WAIT: "wait", RUNNING: "run", SUCCESS: "ok", FAILURE: "fail"}
def __init__(self, num, builder):
self._bld = builder
self._states = self._bld.shared_list()
self._states.extend([self.WAIT] * num)
def running(self, i):
"""Set state of a single process or thread to 'running'.
:param i: Index of process or thread.
:return: None
"""
self._set(i, self.RUNNING)
def success(self, i):
"""Set state of a single process or thread to 'success'.
:param i: Index of process or thread.
:return: None
"""
self._set(i, self.SUCCESS)
def fail(self, i):
"""Set state of a single process or thread to 'failure'.
:param i: Index of process or thread.
:return: None
"""
self._set(i, self.FAILURE)
def has_failures(self):
"""Whether there are any failures in the states.
"""
return self.FAILURE in self._states
def _set(self, index, value):
self._states[index] = value
def __getitem__(self, index):
return self._states[index]
def __str__(self):
return ",".join([self._NAMES[state] for state in self._states])
class ProcRunner(object):
"""This is a work-around to the limitation of multiprocessing that the
function executed in the new module cannot be a method of a class.
We simply set the instance (self) into the class before forking each
process, and the class' method calls the instance method for us.
"""
instance = None
@classmethod
def run(cls, index):
cls.instance._run(index)
def alphadump(d, indent=2, depth=0):
"""Dump a dict to a str,
with keys in alphabetical order.
"""
sep = '\n' + ' ' * depth * indent
return ''.join(
("{}: {}{}".format(
k,
alphadump(d[k], depth=depth+1) if isinstance(d[k], dict)
else str(d[k]),
sep)
for k in sorted(d.keys()))
)
|
migueldiascosta/pymatgen-db
|
matgendb/builders/core.py
|
Python
|
mit
| 17,406
|
[
"pymatgen"
] |
c16b82b5abcb82391514553b93f3b73b231122792c679c8cb6b77fbba301443e
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# listhandling - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""List functions"""
from shared.fileio import pickle, unpickle
def add_item_to_pickled_list(path, item, logger):
list_ = unpickle(path, logger)
output = ''
if list_ == []:
pass
elif not list_:
output += 'Failure: could not unpickle current list'
return (False, output)
# Check if the item already is in the list
if item in list_:
output += '%s is already in the list' % item
return (False, output)
# ok, lets add the new item and pickle and save the new list
list_.append(item)
status = pickle(list_, path, logger)
if not status:
output += 'pickle error'
return (False, output)
return (True, '')
def list_items_in_pickled_list(path, logger):
# list items
list_ = unpickle(path, logger)
if list_ == []:
pass
elif not list_:
return (False, 'Failure: could not unpickle list')
return (True, list_)
def remove_item_from_pickled_list(
path,
item,
logger,
allow_empty_list=True,
):
list_ = unpickle(path, logger)
output = ''
if list_ == []:
# OK, if the list is empty
pass
elif not list_:
output += 'Failure: could not unpickle current list'
return (False, output)
# Check if the item is in the list
item = item.strip()
if not item in list_:
output += '%s not found in list' % item
return (False, output)
if not allow_empty_list:
if len(list_) <= 1:
output += 'You cannot remove the last item'
return (False, output)
# ok, lets remove the item and pickle and save the new list
try:
list_.remove(item)
except:
output += \
'Strange error, %s could not be removed, but it seems to be in the list'\
% item
return (False, output)
status = pickle(list_, path, logger)
if not status:
output += 'Error pickling new owners file'
return (False, output)
return (True, output)
def is_item_in_pickled_list(path, item, logger):
list_ = unpickle(path, logger)
if not list_:
return False
if len(list_) == 0:
return False
if item in list_:
return True
else:
return False
|
heromod/migrid
|
mig/shared/listhandling.py
|
Python
|
gpl-2.0
| 3,240
|
[
"Brian"
] |
18a07468237e5fd78afb9baac51a8be2d5d426b3481f166761bb38d6d60110ad
|
"""Module containing image transformation functions.
This module contains the function decorator
:func:`jicimagelib.transform.transformation` that can be used
to turn functions into image transformations.
Below is an example of how to create a transformation that inverts an image.
>>> import numpy as np
>>> @transformation
... def invert(image):
... "Return the inverted image."
... maximum = np.iinfo(image.dtype).max
... maximum_array = np.ones(image.shape, dtype=image.dtype) * maximum
... return maximum_array - image
...
The :mod:`jicimagelib.transform` module also contains a number of built-in
general purpose transformations that have already had the
:func:`jicimagelib.transform.transformation` function decorator applied to
them.
"""
from functools import wraps
import numpy as np
import PIL.Image
import scipy.ndimage.filters
import skimage.morphology
import skimage.exposure
from jicimagelib.io import AutoName, AutoWrite
from jicimagelib.image import Image
from jicimagelib.util.array import (
normalise,
reduce_stack,
dtype_contract,
)
#############################################################################
# Function decorator for creating transforms.
#############################################################################
def transformation(func):
"""Function decorator to turn another function into a transformation."""
@wraps(func)
def func_as_transformation(*args, **kwargs):
# When using transforms that return new ndarrays we lose the
# jicimagelib.image.Image type and the history of the image.
# One therefore needs to:
# - Extract the history from the input jicimagelib.image.Image.
# - Apply the transformation, which may return a numpy ndarray.
# - Force the image to the jicimagelib.image.Image type.
# - Re-attach the extracted history
if hasattr(args[0], 'history'):
# Working on jicimagelib.Image.
history = args[0].history
else:
# Working on something without a history, e.g. a ndarray stack.
history = []
image = func(*args, **kwargs)
image = Image.from_array(image, log_in_history=False)
image.history = history
image.history.append('Applied {} transform'.format(func.__name__))
if AutoWrite.on:
fpath = AutoName.name(func)
try:
if AutoWrite.auto_safe_dtype:
safe_range_im = 255 * normalise(image)
pil_im = PIL.Image.fromarray(safe_range_im.astype(np.uint8))
else:
pil_im = PIL.Image.fromarray(image)
except TypeError:
# Give a more meaningful error message.
raise(TypeError(
"Cannot handle this data type: {}".format(image.dtype)))
pil_im.save(fpath)
return image
return func_as_transformation
#############################################################################
# General purpose transforms.
#############################################################################
@transformation
def max_intensity_projection(stack):
"""Return maximum intensity projection of a stack.
:param stack: 3D array from which to project third dimension
:returns: :class:`jicimagelib.image.Image`
"""
return reduce_stack(stack, max)
@transformation
def min_intensity_projection(stack):
"""Return minimum intensity projection of a stack.
:param stack: 3D array from which to project third dimension
:returns: :class:`jicimagelib.image.Image`
"""
return reduce_stack(stack, min)
@transformation
@dtype_contract(input_dtype=np.float, output_dtype=np.float)
def smooth_gaussian(image, sigma=1):
"""Returns Gaussian smoothed image.
:param image: numpy array or :class:`jicimagelib.image.Image`
:param sigma: standard deviation
:returns: :class:`jicimagelib.image.Image`
"""
return scipy.ndimage.filters.gaussian_filter(image, sigma=sigma, mode="nearest")
@transformation
@dtype_contract(output_dtype=np.float)
def equalize_adaptive_clahe(image, ntiles=8, clip_limit=0.01):
"""Return contrast limited adaptive histogram equalized image.
The return value is normalised to the range 0 to 1.
:param image: numpy array or :class:`jicimagelib.image.Image` of dtype float
:param ntiles: number of tile regions
:param clip_limit: clipping limit in range 0 to 1,
higher values give more contrast
"""
# Convert input for skimage.
skimage_float_im = normalise(image)
if np.all(skimage_float_im):
raise(RuntimeError("Cannot equalise when there is no variation."))
normalised = skimage.exposure.equalize_adapthist(skimage_float_im,
ntiles_x=ntiles, ntiles_y=ntiles, clip_limit=clip_limit)
assert np.max(normalised) == 1.0
assert np.min(normalised) == 0.0
return normalised
@transformation
@dtype_contract(output_dtype=np.bool)
def threshold_otsu(image, multiplier=1.0):
"""Return image thresholded using Otsu's method.
"""
otsu_value = skimage.filters.threshold_otsu(image)
return image > otsu_value * multiplier
@transformation
@dtype_contract(input_dtype=np.bool, output_dtype=np.bool)
def remove_small_objects(image, min_size=50):
"""Remove small objects from an boolean image.
:param image: boolean numpy array or :class:`jicimagelib.image.Image`
:returns: boolean :class:`jicimagelib.image.Image`
"""
return skimage.morphology.remove_small_objects(image, min_size=min_size)
|
JIC-CSB/jicimagelib
|
jicimagelib/transform.py
|
Python
|
mit
| 5,662
|
[
"Gaussian"
] |
957150d4d5b93c7baffb887b565c7bfd9ddf1cc367708cea1772e78bf271452d
|
class VisitorBase(object):
def visit(self, node):
clsname = node.__class__.__name__
method = "visit_%s" % clsname.lower()
if hasattr(self, method):
return getattr(self, method)(node)
else:
return self.generic_visit(node)
def generic_visit(self, node):
raise NotImplementedError(node)
class Visitor(VisitorBase):
def generic_action(self, node):
return node
def generic_visit(self, node):
for child in node.get_children():
self.visit(child)
return self.generic_action(node)
class Mutator(Visitor):
def generic_action(self, node):
return node
def generic_visit(self, node):
node.set_children([self.visit(c) for c in node.get_children()])
return self.generic_action(node)
|
lisael/fastidious
|
fastidious/compiler/astutils.py
|
Python
|
gpl-3.0
| 823
|
[
"VisIt"
] |
371dec67180d87c211377d08909a2433152c63920243131012a06b9eb1390061
|
"""
DIRAC.WorkloadManagementSystem.PilotAgent package
"""
__RCSID__ = "$Id$"
|
fstagni/DIRAC
|
WorkloadManagementSystem/PilotAgent/__init__.py
|
Python
|
gpl-3.0
| 80
|
[
"DIRAC"
] |
77cfb031279458fc985f0e0a9140f823a6e1e4ed8ee80f6c06710dd61c4a83dc
|
# This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from sqlalchemy.orm import aliased
from sqlalchemy.sql import desc
from sqlalchemy.sql.functions import count
from Core.db import session
from Core.maps import Updates, Galaxy, Planet, Alliance, User, Intel, FleetScan
from Core.loadable import loadable, route, require_planet
from Core.config import Config
from Core.paconf import PA
class surprisesex(loadable):
"""Top alliances attacking the specified target"""
usage = " [x:y[:z]|alliance|user]"
access = "member"
@route(loadable.coord)
def planet_galaxy(self, message, user, params):
# Planet
if params.group(5) is not None:
planet = Planet.load(*params.group(1,3,5))
if planet is None:
message.reply("No planet with coords %s:%s:%s found" % params.group(1,3,5))
else:
self.execute(message, planet=planet)
# Galaxy
else:
galaxy = Galaxy.load(*params.group(1,3))
if galaxy is None:
message.reply("No galaxy with coords %s:%s" % params.group(1,3))
else:
self.execute(message, galaxy=galaxy)
@route(r"(\S+)")
def user_alliance(self, message, user, params):
alliance = Alliance.load(params.group(1))
if alliance is None:
u = User.load(name=params.group(1), exact=False, access="member")
if u is None:
message.reply("No alliance or user matching '%s' found" % (params.group(1),))
elif u.planet is None:
message.reply("User %s has not entered their planet details" % (u.name,))
else:
planet = u.planet
self.execute(message, planet=planet)
else:
self.execute(message, alliance=alliance)
@route(r"")
@require_planet
def me(self, message, user, params):
self.execute(message, planet=user.planet)
def execute(self, message, planet=None, galaxy=None, alliance=None):
tick = Updates.current_tick()
target = aliased(Planet)
target_intel = aliased(Intel)
owner = aliased(Planet)
owner_intel = aliased(Intel)
Q = session.query(Alliance.name, count())
Q = Q.join((FleetScan.owner, owner))
Q = Q.join((FleetScan.target, target))
Q = Q.filter(FleetScan.mission == "Attack")
Q = Q.outerjoin((owner.intel, owner_intel))
Q = Q.outerjoin((owner_intel.alliance, Alliance))
if planet:
Q = Q.filter(FleetScan.target == planet)
if galaxy:
Q = Q.filter(target.galaxy == galaxy)
if alliance:
Q = Q.join((target.intel, target_intel))
Q = Q.filter(target_intel.alliance == alliance)
Q = Q.group_by(Alliance.name)
Q = Q.order_by(desc(count()))
result = Q.all()
if len(result) < 1:
reply="No fleets found targetting"
if planet:
reply+=" coords %s:%s:%s"%(planet.x,planet.y,planet.z)
if galaxy:
reply+=" coords %s:%s"%(galaxy.x,galaxy.y)
if alliance:
reply+=" alliance %s"%(alliance.name,)
message.reply(reply)
return
reply = "Top attackers on"
if planet:
reply+=" coords %s:%s:%s"%(planet.x,planet.y,planet.z)
if galaxy:
reply+=" coords %s:%s"%(galaxy.x,galaxy.y)
if alliance:
reply+=" alliance %s"%(alliance.name,)
reply+=" are (total: %s) "%(sum([attacks for name, attacks in result]),)
prev = []
for name, attacks in result[:5]:
prev.append("%s - %s"%(name or "Unknown",attacks))
message.reply(reply+" | ".join(prev))
|
ellonweb/merlin
|
Hooks/victim/surprisesex.py
|
Python
|
gpl-2.0
| 4,771
|
[
"Galaxy"
] |
1a6c004f10eafac6979da520a2d9eb1db45a1ec4396ac8571609ed2890d6acf1
|
# Copyright 2003 Iddo Friedberg. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import string
"""A parser for the NCBI blastpgp version 2.2.5 output format. Currently only supports
the '-m 9' option, (table w/ annotations).
Returns a BlastTableRec instance
"""
class BlastTableEntry:
def __init__(self,in_rec):
bt_fields = in_rec.split()
self.qid = bt_fields[0].split('|')
self.sid = bt_fields[1].split('|')
self.pid = string.atof(bt_fields[2])
self.ali_len = string.atoi(bt_fields[3])
self.mis = string.atoi(bt_fields[4])
self.gaps = string.atoi(bt_fields[5])
self.q_bounds = (string.atoi(bt_fields[6]), string.atoi(bt_fields[7]))
self.s_bounds = (string.atoi(bt_fields[8]), string.atoi(bt_fields[9]))
self.e_value = string.atof(bt_fields[10])
self.bit_score = string.atof(bt_fields[11])
class BlastTableRec:
def __init__(self):
self.program = None
self.version = None
self.date = None
self.iteration = None
self.query = None
self.database = None
self.entries = []
def add_entry(self, entry):
self.entries.append(entry)
reader_keywords = {'BLASTP': 'version',
'Iteration': 'iteration',
'Query': 'query',
'Database': 'database',
'Fields': 'fields'}
class BlastTableReader:
def __init__(self, handle):
self.handle = handle
inline = self.handle.readline()
# zip forward to start of record
while inline and inline.find('BLASTP') == -1:
inline = self.handle.readline()
self._lookahead = inline
self._n = 0
self._in_header = 1
def next(self):
self.table_record = BlastTableRec()
self._n += 1
inline = self._lookahead
if not inline:
return None
while inline:
if inline[0] == '#':
if self._in_header:
self._in_header = self._consume_header(inline)
else:
break
else:
self._consume_entry(inline)
self._in_header = 0
inline = self.handle.readline()
self._lookahead = inline
self._in_header = 1
return self.table_record
def _consume_entry(self, inline):
current_entry = BlastTableEntry(inline)
self.table_record.add_entry(current_entry)
def _consume_header(self, inline):
for keyword in reader_keywords.keys():
if inline.find(keyword) > -1:
in_header = self._Parse('_parse_%s' % reader_keywords[keyword],inline)
break
return in_header
def _parse_version(self, inline):
program, version, date = inline.split()[1:]
self.table_record.program = program
self.table_record.version = version
self.table_record.date = date
return 1
def _parse_iteration(self, inline):
self.table_record.iteration = string.atoi(inline.split()[2])
return 1
def _parse_query(self, inline):
self.table_record.query = inline.split()[2:]
return 1
def _parse_database(self, inline):
self.table_record.database = inline.split()[2]
return 1
def _parse_fields(self, inline):
return 0
def _Parse(self, method_name, inline):
return getattr(self,method_name)(inline)
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/Blast/ParseBlastTable.py
|
Python
|
apache-2.0
| 3,467
|
[
"Biopython"
] |
b5858b3c832152914095119c1fde8a1d1d94315c393cd5b68cf50ee1702da367
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
"""
# pylint: disable-all
import collections
import datetime
import inspect
import logging
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import time
import cmd_helper
import constants
import system_properties
from utils import host_utils
try:
from pylib import pexpect
except ImportError:
pexpect = None
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
import adb_interface
import am_instrument_parser
import errors
from pylib.device import device_blacklist
from pylib.device import device_errors
# Pattern to search for the next whole line of pexpect output and capture it
# into a match group. We can't use ^ and $ for line start end with pexpect,
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
# Java properties file
LOCAL_PROPERTIES_PATH = constants.DEVICE_LOCAL_PROPERTIES_PATH
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
KEYCODE_HOME = 3
KEYCODE_BACK = 4
KEYCODE_DPAD_UP = 19
KEYCODE_DPAD_DOWN = 20
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
MD5SUM_DEVICE_FOLDER = constants.TEST_EXECUTABLE_DIR + '/md5sum/'
MD5SUM_DEVICE_PATH = MD5SUM_DEVICE_FOLDER + 'md5sum_bin'
PIE_WRAPPER_PATH = constants.TEST_EXECUTABLE_DIR + '/run_pie'
CONTROL_USB_CHARGING_COMMANDS = [
{
# Nexus 4
'witness_file': '/sys/module/pm8921_charger/parameters/disabled',
'enable_command': 'echo 0 > /sys/module/pm8921_charger/parameters/disabled',
'disable_command':
'echo 1 > /sys/module/pm8921_charger/parameters/disabled',
},
]
class DeviceTempFile(object):
def __init__(self, android_commands, prefix='temp_file', suffix=''):
"""Find an unused temporary file path in the devices external directory.
When this object is closed, the file will be deleted on the device.
"""
self.android_commands = android_commands
while True:
# TODO(cjhopman): This could actually return the same file in multiple
# calls if the caller doesn't write to the files immediately. This is
# expected to never happen.
i = random.randint(0, 1000000)
self.name = '%s/%s-%d-%010d%s' % (
android_commands.GetExternalStorage(),
prefix, int(time.time()), i, suffix)
if not android_commands.FileExistsOnDevice(self.name):
break
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.android_commands.RunShellCommand('rm ' + self.name)
def GetAVDs():
"""Returns a list of AVDs."""
re_avd = re.compile('^[ ]+Name: ([a-zA-Z0-9_:.-]+)', re.MULTILINE)
avds = re_avd.findall(cmd_helper.GetCmdOutput(['android', 'list', 'avd']))
return avds
def ResetBadDevices():
"""Removes the blacklist that keeps track of bad devices for a current
build.
"""
device_blacklist.ResetBlacklist()
def ExtendBadDevices(devices):
"""Adds devices to the blacklist that keeps track of bad devices for a
current build.
The devices listed in the bad devices file will not be returned by
GetAttachedDevices.
Args:
devices: list of bad devices to be added to the bad devices file.
"""
device_blacklist.ExtendBlacklist(devices)
def GetAttachedDevices(hardware=True, emulator=True, offline=False):
"""Returns a list of attached, android devices and emulators.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list. The arguments specify what devices to include in the list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
Args:
hardware: Include attached actual devices that are online.
emulator: Include emulators (i.e. AVD's) currently on host.
offline: Include devices and emulators that are offline.
Returns: List of devices.
"""
adb_devices_output = cmd_helper.GetCmdOutput([constants.GetAdbPath(),
'devices'])
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
online_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^(emulator-[0-9]+)\tdevice', re.MULTILINE)
emulator_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\toffline$', re.MULTILINE)
offline_devices = re_device.findall(adb_devices_output)
devices = []
# First determine list of online devices (e.g. hardware and/or emulator).
if hardware and emulator:
devices = online_devices
elif hardware:
devices = [device for device in online_devices
if device not in emulator_devices]
elif emulator:
devices = emulator_devices
# Now add offline devices if offline is true
if offline:
devices = devices + offline_devices
# Remove any devices in the blacklist.
blacklist = device_blacklist.ReadBlacklist()
if len(blacklist):
logging.info('Avoiding bad devices %s', ' '.join(blacklist))
devices = [device for device in devices if device not in blacklist]
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def IsDeviceAttached(device):
"""Return true if the device is attached and online."""
return device in GetAttachedDevices()
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir) + 1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def _ParseMd5SumOutput(md5sum_output):
"""Returns a list of tuples from the provided md5sum output.
Args:
md5sum_output: output directly from md5sum binary.
Returns:
List of namedtuples with attributes |hash| and |path|, where |path| is the
absolute path to the file with an Md5Sum of |hash|.
"""
HashAndPath = collections.namedtuple('HashAndPath', ['hash', 'path'])
split_lines = [line.split(' ') for line in md5sum_output]
return [HashAndPath._make(s) for s in split_lines if len(s) == 2]
def _HasAdbPushSucceeded(command_output):
"""Returns whether adb push has succeeded from the provided output."""
# TODO(frankf): We should look at the return code instead of the command
# output for many of the commands in this file.
if not command_output:
return True
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', command_output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + command_output)
return False
return True
def GetLogTimestamp(log_line, year):
"""Returns the timestamp of the given |log_line| in the given year."""
try:
return datetime.datetime.strptime('%s-%s' % (year, log_line[:18]),
'%Y-%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb."""
def __init__(self, device=None):
"""Constructor.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
"""
adb_dir = os.path.dirname(constants.GetAdbPath())
if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
# Required by third_party/android_testrunner to call directly 'adb'.
os.environ['PATH'] += os.pathsep + adb_dir
self._adb = adb_interface.AdbInterface()
if device:
self._adb.SetTargetSerial(device)
self._device = device
self._logcat = None
self.logcat_process = None
self._logcat_tmpoutfile = None
self._pushed_files = []
self._device_utc_offset = None
self._potential_push_size = 0
self._actual_push_size = 0
self._external_storage = ''
self._util_wrapper = ''
self._system_properties = system_properties.SystemProperties(self.Adb())
self._push_if_needed_cache = {}
self._control_usb_charging_command = {
'command': None,
'cached': False,
}
self._protected_file_access_method_initialized = None
self._privileged_command_runner = None
self._pie_wrapper = None
@property
def system_properties(self):
return self._system_properties
def _LogShell(self, cmd):
"""Logs the adb shell command."""
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
logging.info('[%s]> %s', device_repr, cmd)
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
# TODO(tonyg): Goal should be to git rid of this method by making this API
# complete and alleviating the need.
return self._adb
def GetDevice(self):
"""Returns the device serial."""
return self._device
def IsOnline(self):
"""Checks whether the device is online.
Returns:
True if device is in 'device' mode, False otherwise.
"""
# TODO(aurimas): revert to using adb get-state when android L adb is fixed.
#out = self._adb.SendCommand('get-state')
#return out.strip() == 'device'
out = self._adb.SendCommand('devices')
for line in out.split('\n'):
if self._device in line and 'device' in line:
return True
return False
def IsRootEnabled(self):
"""Checks if root is enabled on the device."""
root_test_output = self.RunShellCommand('ls /root') or ['']
return not 'Permission denied' in root_test_output[0]
def EnableAdbRoot(self):
"""Enables adb root on the device.
Returns:
True: if output from executing adb root was as expected.
False: otherwise.
"""
if self.GetBuildType() == 'user':
logging.warning("Can't enable root in production builds with type user")
return False
else:
return_value = self._adb.EnableAdbRoot()
# EnableAdbRoot inserts a call for wait-for-device only when adb logcat
# output matches what is expected. Just to be safe add a call to
# wait-for-device.
self._adb.SendCommand('wait-for-device')
return return_value
def GetDeviceYear(self):
"""Returns the year information of the date on device."""
return self.RunShellCommand('date +%Y')[0]
def GetExternalStorage(self):
if not self._external_storage:
self._external_storage = self.RunShellCommand('echo $EXTERNAL_STORAGE')[0]
if not self._external_storage:
raise device_errors.CommandFailedError(
['shell', "'echo $EXTERNAL_STORAGE'"],
'Unable to find $EXTERNAL_STORAGE')
return self._external_storage
def WaitForDevicePm(self, timeout=120):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm(wait_time=timeout)
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s', e)
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot or not self.IsRootEnabled():
self._adb.SendCommand('reboot')
self._system_properties = system_properties.SystemProperties(self.Adb())
timeout = 300
retries = 1
# Wait for the device to disappear.
while retries < 10 and self.IsOnline():
time.sleep(1)
retries += 1
else:
self.RestartShell()
timeout = 120
# To run tests we need at least the package manager and the sd card (or
# other external storage) to be ready.
self.WaitForDevicePm(timeout)
self.WaitForSdCardReady(timeout)
def Shutdown(self):
"""Shuts down the device."""
self._adb.SendCommand('reboot -p')
self._system_properties = system_properties.SystemProperties(self.Adb())
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
Returns:
A status string returned by adb uninstall
"""
uninstall_command = 'uninstall %s' % package
self._LogShell(uninstall_command)
return self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path, reinstall=False):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
reinstall: Reinstall an existing apk, keeping the data.
Returns:
A status string returned by adb install
"""
assert os.path.isfile(package_file_path), ('<%s> is not file' %
package_file_path)
install_cmd = ['install']
if reinstall:
install_cmd.append('-r')
install_cmd.append(package_file_path)
install_cmd = ' '.join(install_cmd)
self._LogShell(install_cmd)
return self._adb.SendCommand(install_cmd,
timeout_time=2 * 60,
retry_count=0)
def ManagedInstall(self, apk_path, keep_data=False, package_name=None,
reboots_on_timeout=2):
"""Installs specified package and reboots device on timeouts.
If package_name is supplied, checks if the package is already installed and
doesn't reinstall if the apk md5sums match.
Args:
apk_path: Path to .apk file to install.
keep_data: Reinstalls instead of uninstalling first, preserving the
application data.
package_name: Package name (only needed if keep_data=False).
reboots_on_timeout: number of time to reboot if package manager is frozen.
"""
# Check if package is already installed and up to date.
if package_name:
installed_apk_path = self.GetApplicationPath(package_name)
if (installed_apk_path and
not self.GetFilesChanged(apk_path, installed_apk_path,
ignore_filenames=True)):
logging.info('Skipped install: identical %s APK already installed' %
package_name)
return
# Install.
reboots_left = reboots_on_timeout
while True:
try:
if not keep_data:
assert package_name
self.Uninstall(package_name)
install_status = self.Install(apk_path, reinstall=keep_data)
if 'Success' in install_status:
return
else:
raise Exception('Install failure: %s' % install_status)
except errors.WaitForResponseTimedOutError:
print '@@@STEP_WARNINGS@@@'
logging.info('Timeout on installing %s on device %s', apk_path,
self._device)
if reboots_left <= 0:
raise Exception('Install timed out')
# Force a hard reboot on last attempt
self.Reboot(full_reboot=(reboots_left == 1))
reboots_left -= 1
def MakeSystemFolderWritable(self):
"""Remounts the /system folder rw."""
out = self._adb.SendCommand('remount')
if out.strip() != 'remount succeeded':
raise errors.MsgException('Remount failed: %s' % out)
def RestartAdbdOnDevice(self):
logging.info('Restarting adbd on the device...')
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'restart_adbd.sh')
self._adb.Push(host_script_path, temp_script_file.name)
self.RunShellCommand('. %s' % temp_script_file.name)
self._adb.SendCommand('wait-for-device')
def RestartAdbServer(self):
"""Restart the adb server."""
ret = self.KillAdbServer()
if ret != 0:
raise errors.MsgException('KillAdbServer: %d' % ret)
ret = self.StartAdbServer()
if ret != 0:
raise errors.MsgException('StartAdbServer: %d' % ret)
@staticmethod
def KillAdbServer():
"""Kill adb server."""
adb_cmd = [constants.GetAdbPath(), 'kill-server']
ret = cmd_helper.RunCmd(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret != 0:
# pgrep didn't find adb, kill-server succeeded.
return 0
retry += 1
time.sleep(retry)
return ret
def StartAdbServer(self):
"""Start adb server."""
adb_cmd = ['taskset', '-c', '0', constants.GetAdbPath(), 'start-server']
ret, _ = cmd_helper.GetCmdStatusAndOutput(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret == 0:
# pgrep found adb, start-server succeeded.
# Waiting for device to reconnect before returning success.
self._adb.SendCommand('wait-for-device')
return 0
retry += 1
time.sleep(retry)
return ret
def WaitForSystemBootCompleted(self, wait_time):
"""Waits for targeted system's boot_completed flag to be set.
Args:
wait_time: time in seconds to wait
Raises:
WaitForResponseTimedOutError if wait_time elapses and flag still not
set.
"""
logging.info('Waiting for system boot completed...')
self._adb.SendCommand('wait-for-device')
# Now the device is there, but system not boot completed.
# Query the sys.boot_completed flag with a basic command
boot_completed = False
attempts = 0
wait_period = 5
while not boot_completed and (attempts * wait_period) < wait_time:
output = self.system_properties['sys.boot_completed']
output = output.strip()
if output == '1':
boot_completed = True
else:
# If 'error: xxx' returned when querying the flag, it means
# adb server lost the connection to the emulator, so restart the adb
# server.
if 'error:' in output:
self.RestartAdbServer()
time.sleep(wait_period)
attempts += 1
if not boot_completed:
raise errors.WaitForResponseTimedOutError(
'sys.boot_completed flag was not set after %s seconds' % wait_time)
def WaitForSdCardReady(self, timeout_time):
"""Wait for the SD card ready before pushing data into it."""
logging.info('Waiting for SD card ready...')
sdcard_ready = False
attempts = 0
wait_period = 5
external_storage = self.GetExternalStorage()
while not sdcard_ready and attempts * wait_period < timeout_time:
output = self.RunShellCommand('ls ' + external_storage)
if output:
sdcard_ready = True
else:
time.sleep(wait_period)
attempts += 1
if not sdcard_ready:
raise errors.WaitForResponseTimedOutError(
'SD card not ready after %s seconds' % timeout_time)
def GetAndroidToolStatusAndOutput(self, command, lib_path=None, *args, **kw):
"""Runs a native Android binary, wrapping the command as necessary.
This is a specialization of GetShellCommandStatusAndOutput, which is meant
for running tools/android/ binaries and handle properly: (1) setting the
lib path (for component=shared_library), (2) using the PIE wrapper on ICS.
See crbug.com/373219 for more context.
Args:
command: String containing the command to send.
lib_path: (optional) path to the folder containing the dependent libs.
Same other arguments of GetCmdStatusAndOutput.
"""
# The first time this command is run the device is inspected to check
# whether a wrapper for running PIE executable is needed (only Android ICS)
# or not. The results is cached, so the wrapper is pushed only once.
if self._pie_wrapper is None:
# None: did not check; '': did check and not needed; '/path': use /path.
self._pie_wrapper = ''
if self.GetBuildId().startswith('I'): # Ixxxx = Android ICS.
run_pie_dist_path = os.path.join(constants.GetOutDirectory(), 'run_pie')
assert os.path.exists(run_pie_dist_path), 'Please build run_pie'
# The PIE loader must be pushed manually (i.e. no PushIfNeeded) because
# PushIfNeeded requires md5sum and md5sum requires the wrapper as well.
command = 'push %s %s' % (run_pie_dist_path, PIE_WRAPPER_PATH)
assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
self._pie_wrapper = PIE_WRAPPER_PATH
if self._pie_wrapper:
command = '%s %s' % (self._pie_wrapper, command)
if lib_path:
command = 'LD_LIBRARY_PATH=%s %s' % (lib_path, command)
return self.GetShellCommandStatusAndOutput(command, *args, **kw)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=False):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send. Must not include
the single quotes as we use them to escape the whole command.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
self._LogShell(command)
if "'" in command:
logging.warning(command + " contains ' quotes")
result = self._adb.SendShellCommand(
"'%s'" % command, timeout_time).splitlines()
# TODO(b.kelemen): we should really be able to drop the stderr of the
# command or raise an exception based on what the caller wants.
result = [ l for l in result if not l.startswith('WARNING') ]
if ['error: device not found'] == result:
raise errors.DeviceUnresponsiveError('device not found')
if log_result:
self._LogShell('\n'.join(result))
return result
def GetShellCommandStatusAndOutput(self, command, timeout_time=20,
log_result=False):
"""See RunShellCommand() above.
Returns:
The tuple (exit code, list of output lines).
"""
lines = self.RunShellCommand(
command + '; echo %$?', timeout_time, log_result)
last_line = lines[-1]
status_pos = last_line.rfind('%')
assert status_pos >= 0
status = int(last_line[status_pos + 1:])
if status_pos == 0:
lines = lines[:-1]
else:
lines = lines[:-1] + [last_line[:status_pos]]
return (status, lines)
def KillAll(self, process, signum=9, with_su=False):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off.
signum: signal to use, 9 (SIGKILL) by default.
with_su: wether or not to use su to kill the processes.
Returns:
the number of processes killed
"""
pids = self.ExtractPid(process)
if pids:
cmd = 'kill -%d %s' % (signum, ' '.join(pids))
if with_su:
self.RunShellCommandWithSU(cmd)
else:
self.RunShellCommand(cmd)
return len(pids)
def KillAllBlocking(self, process, timeout_sec, signum=9, with_su=False):
"""Blocking version of killall, connected via adb.
This waits until no process matching the corresponding name appears in ps'
output anymore.
Args:
process: name of the process to kill off
timeout_sec: the timeout in seconds
signum: same as |KillAll|
with_su: same as |KillAll|
Returns:
the number of processes killed
"""
processes_killed = self.KillAll(process, signum=signum, with_su=with_su)
if processes_killed:
elapsed = 0
wait_period = 0.1
# Note that this doesn't take into account the time spent in ExtractPid().
while self.ExtractPid(process) and elapsed < timeout_sec:
time.sleep(wait_period)
elapsed += wait_period
if elapsed >= timeout_sec:
return processes_killed - self.ExtractPid(process)
return processes_killed
@staticmethod
def _GetActivityCommand(package, activity, wait_for_completion, action,
category, data, extras, trace_file_name, force_stop,
flags):
"""Creates command to start |package|'s activity on the device.
Args - as for StartActivity
Returns:
the command to run on the target to start the activity
"""
cmd = 'am start -a %s' % action
if force_stop:
cmd += ' -S'
if wait_for_completion:
cmd += ' -W'
if category:
cmd += ' -c %s' % category
if package and activity:
cmd += ' -n %s/%s' % (package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
for key in extras:
value = extras[key]
if isinstance(value, str):
cmd += ' --es'
elif isinstance(value, bool):
cmd += ' --ez'
elif isinstance(value, int):
cmd += ' --ei'
else:
raise NotImplementedError(
'Need to teach StartActivity how to pass %s extras' % type(value))
cmd += ' %s %s' % (key, value)
if trace_file_name:
cmd += ' --start-profiler ' + trace_file_name
if flags:
cmd += ' -f %s' % flags
return cmd
def StartActivity(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.google.android.apps.chrome').
activity: Name of activity (e.g. '.Main' or
'com.google.android.apps.chrome.Main').
wait_for_completion: wait for the activity to finish launching (-W flag).
action: string (e.g. "android.intent.action.MAIN"). Default is VIEW.
category: string (e.g. "android.intent.category.HOME")
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity. Values are significant.
trace_file_name: If used, turns on and saves the trace to this file name.
force_stop: force stop the target app before starting the activity (-S
flag).
Returns:
The output of the underlying command as a list of lines.
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
return self.RunShellCommand(cmd)
def StartActivityTimed(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device, returning the start time
Args - as for StartActivity
Returns:
A tuple containing:
- the output of the underlying command as a list of lines, and
- a timestamp string for the time at which the activity started
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
self.StartMonitoringLogcat()
out = self.RunShellCommand('log starting activity; ' + cmd)
activity_started_re = re.compile('.*starting activity.*')
m = self.WaitForLogMatch(activity_started_re, None)
assert m
start_line = m.group(0)
return (out, GetLogTimestamp(start_line, self.GetDeviceYear()))
def StartCrashUploadService(self, package):
# TODO(frankf): We really need a python wrapper around Intent
# to be shared with StartActivity/BroadcastIntent.
cmd = (
'am startservice -a %s.crash.ACTION_FIND_ALL -n '
'%s/%s.crash.MinidumpUploadService' %
(constants.PACKAGE_INFO['chrome'].package,
package,
constants.PACKAGE_INFO['chrome'].package))
am_output = self.RunShellCommandWithSU(cmd)
assert am_output and 'Starting' in am_output[-1], (
'Service failed to start: %s' % am_output)
time.sleep(15)
def BroadcastIntent(self, package, intent, *args):
"""Send a broadcast intent.
Args:
package: Name of package containing the intent.
intent: Name of the intent.
args: Optional extra arguments for the intent.
"""
cmd = 'am broadcast -a %s.%s %s' % (package, intent, ' '.join(args))
self.RunShellCommand(cmd)
def GoHome(self):
"""Tell the device to return to the home screen. Blocks until completion."""
self.RunShellCommand('am start -W '
'-a android.intent.action.MAIN -c android.intent.category.HOME')
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g.
com.google.android.apps.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def GetApplicationPath(self, package):
"""Get the installed apk path on the device for the given package.
Args:
package: Name of the package.
Returns:
Path to the apk on the device if it exists, None otherwise.
"""
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output contains anything if and only if the package
# exists.
if pm_path_output:
# pm_path_output is of the form: "package:/path/to/foo.apk"
return pm_path_output[0].split(':')[1]
else:
return None
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
# Check that the package exists before clearing it. Necessary because
# calling pm clear on a package that doesn't exist may never return.
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output only contains anything if and only if the package exists.
if pm_path_output:
self.RunShellCommand('pm clear ' + package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def _RunMd5Sum(self, host_path, device_path):
"""Gets the md5sum of a host path and device path.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
Returns:
A tuple containing lists of the host and device md5sum results as
created by _ParseMd5SumOutput().
"""
md5sum_dist_path = os.path.join(constants.GetOutDirectory(),
'md5sum_dist')
assert os.path.exists(md5sum_dist_path), 'Please build md5sum.'
md5sum_dist_mtime = os.stat(md5sum_dist_path).st_mtime
if (md5sum_dist_path not in self._push_if_needed_cache or
self._push_if_needed_cache[md5sum_dist_path] != md5sum_dist_mtime):
command = 'push %s %s' % (md5sum_dist_path, MD5SUM_DEVICE_FOLDER)
assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
self._push_if_needed_cache[md5sum_dist_path] = md5sum_dist_mtime
(_, md5_device_output) = self.GetAndroidToolStatusAndOutput(
self._util_wrapper + ' ' + MD5SUM_DEVICE_PATH + ' ' + device_path,
lib_path=MD5SUM_DEVICE_FOLDER,
timeout_time=2 * 60)
device_hash_tuples = _ParseMd5SumOutput(md5_device_output)
assert os.path.exists(host_path), 'Local path not found %s' % host_path
md5sum_output = cmd_helper.GetCmdOutput(
[os.path.join(constants.GetOutDirectory(), 'md5sum_bin_host'),
host_path])
host_hash_tuples = _ParseMd5SumOutput(md5sum_output.splitlines())
return (host_hash_tuples, device_hash_tuples)
def GetFilesChanged(self, host_path, device_path, ignore_filenames=False):
"""Compares the md5sum of a host path against a device path.
Note: Ignores extra files on the device.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
ignore_filenames: If True only the file contents are considered when
checking whether a file has changed, otherwise the relative path
must also match.
Returns:
A list of tuples of the form (host_path, device_path) for files whose
md5sums do not match.
"""
# Md5Sum resolves symbolic links in path names so the calculation of
# relative path names from its output will need the real path names of the
# base directories. Having calculated these they are used throughout the
# function since this makes us less subject to any future changes to Md5Sum.
real_host_path = os.path.realpath(host_path)
real_device_path = self.RunShellCommand('realpath "%s"' % device_path)[0]
host_hash_tuples, device_hash_tuples = self._RunMd5Sum(
real_host_path, real_device_path)
if len(host_hash_tuples) > len(device_hash_tuples):
logging.info('%s files do not exist on the device' %
(len(host_hash_tuples) - len(device_hash_tuples)))
host_rel = [(os.path.relpath(os.path.normpath(t.path), real_host_path),
t.hash)
for t in host_hash_tuples]
if os.path.isdir(real_host_path):
def RelToRealPaths(rel_path):
return (os.path.join(real_host_path, rel_path),
os.path.join(real_device_path, rel_path))
else:
assert len(host_rel) == 1
def RelToRealPaths(_):
return (real_host_path, real_device_path)
if ignore_filenames:
# If we are ignoring file names, then we want to push any file for which
# a file with an equivalent MD5 sum does not exist on the device.
device_hashes = set([h.hash for h in device_hash_tuples])
ShouldPush = lambda p, h: h not in device_hashes
else:
# Otherwise, we want to push any file on the host for which a file with
# an equivalent MD5 sum does not exist at the same relative path on the
# device.
device_rel = dict([(os.path.relpath(os.path.normpath(t.path),
real_device_path),
t.hash)
for t in device_hash_tuples])
ShouldPush = lambda p, h: p not in device_rel or h != device_rel[p]
return [RelToRealPaths(path) for path, host_hash in host_rel
if ShouldPush(path, host_hash)]
def PushIfNeeded(self, host_path, device_path):
"""Pushes |host_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same hash.
All pushed files can be removed by calling RemovePushedFiles().
"""
MAX_INDIVIDUAL_PUSHES = 50
if not os.path.exists(host_path):
raise device_errors.CommandFailedError(
'Local path not found %s' % host_path, device=str(self))
# See if the file on the host changed since the last push (if any) and
# return early if it didn't. Note that this shortcut assumes that the tests
# on the device don't modify the files.
if not os.path.isdir(host_path):
if host_path in self._push_if_needed_cache:
host_path_mtime = self._push_if_needed_cache[host_path]
if host_path_mtime == os.stat(host_path).st_mtime:
return
size = host_utils.GetRecursiveDiskUsage(host_path)
self._pushed_files.append(device_path)
self._potential_push_size += size
if os.path.isdir(host_path):
self.RunShellCommand('mkdir -p "%s"' % device_path)
changed_files = self.GetFilesChanged(host_path, device_path)
logging.info('Found %d files that need to be pushed to %s',
len(changed_files), device_path)
if not changed_files:
return
def Push(host, device):
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout
# of 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (host, device)
self._LogShell(push_command)
# Retry push with increasing backoff if the device is busy.
retry = 0
while True:
output = self._adb.SendCommand(push_command, timeout_time=30 * 60)
if _HasAdbPushSucceeded(output):
if not os.path.isdir(host_path):
self._push_if_needed_cache[host] = os.stat(host).st_mtime
return
if retry < 3:
retry += 1
wait_time = 5 * retry
logging.error('Push failed, retrying in %d seconds: %s' %
(wait_time, output))
time.sleep(wait_time)
else:
raise Exception('Push failed: %s' % output)
diff_size = 0
if len(changed_files) <= MAX_INDIVIDUAL_PUSHES:
diff_size = sum(host_utils.GetRecursiveDiskUsage(f[0])
for f in changed_files)
# TODO(craigdh): Replace this educated guess with a heuristic that
# approximates the push time for each method.
if len(changed_files) > MAX_INDIVIDUAL_PUSHES or diff_size > 0.5 * size:
self._actual_push_size += size
Push(host_path, device_path)
else:
for f in changed_files:
Push(f[0], f[1])
self._actual_push_size += diff_size
def GetPushSizeInfo(self):
"""Get total size of pushes to the device done via PushIfNeeded()
Returns:
A tuple:
1. Total size of push requests to PushIfNeeded (MB)
2. Total size that was actually pushed (MB)
"""
return (self._potential_push_size, self._actual_push_size)
def GetFileContents(self, filename, log_result=False):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('cat "%s" 2>/dev/null' % filename,
log_result=log_result)
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
def RunShellCommandWithSU(self, command, timeout_time=20, log_result=False):
return self.RunShellCommand('su -c %s' % command, timeout_time, log_result)
def CanAccessProtectedFileContents(self):
"""Returns True if Get/SetProtectedFileContents would work via "su" or adb
shell running as root.
Devices running user builds don't have adb root, but may provide "su" which
can be used for accessing protected files.
"""
return (self._GetProtectedFileCommandRunner() != None)
def _GetProtectedFileCommandRunner(self):
"""Finds the best method to access protected files on the device.
Returns:
1. None when privileged files cannot be accessed on the device.
2. Otherwise: A function taking a single parameter: a string with command
line arguments. Running that function executes the command with
the appropriate method.
"""
if self._protected_file_access_method_initialized:
return self._privileged_command_runner
self._privileged_command_runner = None
self._protected_file_access_method_initialized = True
for cmd in [self.RunShellCommand, self.RunShellCommandWithSU]:
# Get contents of the auxv vector for the init(8) process from a small
# binary file that always exists on linux and is always read-protected.
contents = cmd('cat /proc/1/auxv')
# The leading 4 or 8-bytes of auxv vector is a_type. There are not many
# reserved a_type values, hence byte 2 must always be '\0' for a realistic
# auxv. See /usr/include/elf.h.
if len(contents) > 0 and (contents[0][2] == '\0'):
self._privileged_command_runner = cmd
break
return self._privileged_command_runner
def GetProtectedFileContents(self, filename):
"""Gets contents from the protected file specified by |filename|.
This is potentially less efficient than GetFileContents.
"""
command = 'cat "%s" 2> /dev/null' % filename
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning('Could not access protected file: %s' % filename)
return []
def SetProtectedFileContents(self, filename, contents):
"""Writes |contents| to the protected file specified by |filename|.
This is less efficient than SetFileContents.
"""
with DeviceTempFile(self) as temp_file:
with DeviceTempFile(self, suffix=".sh") as temp_script:
# Put the contents in a temporary file
self.SetFileContents(temp_file.name, contents)
# Create a script to copy the file contents to its final destination
self.SetFileContents(temp_script.name,
'cat %s > %s' % (temp_file.name, filename))
command = 'sh %s' % temp_script.name
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning(
'Could not set contents of protected file: %s' % filename)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2 * 60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self.GetUtcOffset())
def GetUtcOffset(self):
if not self._device_utc_offset:
self._device_utc_offset = self.RunShellCommand('date +%z')[0]
return self._device_utc_offset
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
with open(temp_props_file.name) as f:
properties = f.read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.system_properties[JAVA_ASSERT_PROPERTY]
if was_set == enable:
return False
self.system_properties[JAVA_ASSERT_PROPERTY] = enable and 'all' or ''
return True
def GetBuildId(self):
"""Returns the build ID of the system (e.g. JRM79C)."""
build_id = self.system_properties['ro.build.id']
assert build_id
return build_id
def GetBuildType(self):
"""Returns the build type of the system (e.g. eng)."""
build_type = self.system_properties['ro.build.type']
assert build_type
return build_type
def GetBuildProduct(self):
"""Returns the build product of the device (e.g. maguro)."""
build_product = self.system_properties['ro.build.product']
assert build_product
return build_product
def GetProductName(self):
"""Returns the product name of the device (e.g. takju)."""
name = self.system_properties['ro.product.name']
assert name
return name
def GetBuildFingerprint(self):
"""Returns the build fingerprint of the device."""
build_fingerprint = self.system_properties['ro.build.fingerprint']
assert build_fingerprint
return build_fingerprint
def GetDescription(self):
"""Returns the description of the system.
For example, "yakju-userdebug 4.1 JRN54F 364167 dev-keys".
"""
description = self.system_properties['ro.build.description']
assert description
return description
def GetProductModel(self):
"""Returns the name of the product model (e.g. "Galaxy Nexus") """
model = self.system_properties['ro.product.model']
assert model
return model
def GetWifiIP(self):
"""Returns the wifi IP on the device."""
wifi_ip = self.system_properties['dhcp.wlan0.ipaddress']
# Do not assert here. Devices (e.g. emulators) may not have a WifiIP.
return wifi_ip
def GetSubscriberInfo(self):
"""Returns the device subscriber info (e.g. GSM and device ID) as string."""
iphone_sub = self.RunShellCommand('dumpsys iphonesubinfo')
# Do not assert here. Devices (e.g. Nakasi on K) may not have iphonesubinfo.
return '\n'.join(iphone_sub)
def GetBatteryInfo(self):
"""Returns a {str: str} dict of battery info (e.g. status, level, etc)."""
battery = self.RunShellCommand('dumpsys battery')
assert battery
battery_info = {}
for line in battery[1:]:
k, _, v = line.partition(': ')
battery_info[k.strip()] = v.strip()
return battery_info
def GetSetupWizardStatus(self):
"""Returns the status of the device setup wizard (e.g. DISABLED)."""
status = self.system_properties['ro.setupwizard.mode']
# On some devices, the status is empty if not otherwise set. In such cases
# the caller should expect an empty string to be returned.
return status
def StartMonitoringLogcat(self, clear=True, logfile=None, filters=None):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = []
if self._adb._target_arg:
args += shlex.split(self._adb._target_arg)
args += ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
if logfile:
logfile = NewLineNormalizer(logfile)
# Spawn logcat and synchronize with it.
for _ in range(4):
self._logcat = pexpect.spawn(constants.GetAdbPath(), args, timeout=10,
logfile=logfile)
if not clear or self.SyncLogCat():
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def SyncLogCat(self):
"""Synchronize with logcat.
Synchronize with the monitored logcat so that WaitForLogMatch will only
consider new message that are received after this point in time.
Returns:
True if the synchronization succeeded.
"""
assert self._logcat
tag = 'logcat_sync_%s' % time.time()
self.RunShellCommand('log ' + tag)
return self._logcat.expect([tag, pexpect.EOF, pexpect.TIMEOUT]) == 0
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, success_re, error_re, clear=False, timeout=10):
"""Blocks until a matching line is logged or a timeout occurs.
Args:
success_re: A compiled re to search each line for.
error_re: A compiled re which, if found, terminates the search for
|success_re|. If None is given, no error condition will be detected.
clear: If True the existing logcat output will be cleared, defaults to
false.
timeout: Timeout in seconds to wait for a log match.
Raises:
pexpect.TIMEOUT after |timeout| seconds without a match for |success_re|
or |error_re|.
Returns:
The re match object if |success_re| is matched first or None if |error_re|
is matched first.
"""
logging.info('<<< Waiting for logcat:' + str(success_re.pattern))
t0 = time.time()
while True:
if not self._logcat:
self.StartMonitoringLogcat(clear)
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + timeout - time.time()
if time_remaining < 0:
raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
if error_re:
error_match = error_re.search(line)
if error_match:
return None
success_match = success_re.search(line)
if success_match:
return success_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(timeout, success_re.pattern))
except pexpect.EOF:
# It seems that sometimes logcat can end unexpectedly. This seems
# to happen during Chrome startup after a reboot followed by a cache
# clean. I don't understand why this happens, but this code deals with
# getting EOF in logcat.
logging.critical('Found EOF in adb logcat. Restarting...')
# Rerun spawn with original arguments. Note that self._logcat.args[0] is
# the path of adb, so we don't want it in the arguments.
self._logcat = pexpect.spawn(constants.GetAdbPath(),
self._logcat.args[1:],
timeout=self._logcat.timeout,
logfile=self._logcat.logfile)
def StartRecordingLogcat(self, clear=True, filters=None):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if not filters:
filters = ['*:v']
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb %s logcat -v threadtime %s' % (self._adb._target_arg,
' '.join(filters))
self._logcat_tmpoutfile = tempfile.NamedTemporaryFile(bufsize=0)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=self._logcat_tmpoutfile)
def GetCurrentRecordedLogcat(self):
"""Return the current content of the logcat being recorded.
Call this after StartRecordingLogcat() and before StopRecordingLogcat().
This can be useful to perform timed polling/parsing.
Returns:
Current logcat output as a single string, or None if
StopRecordingLogcat() was already called.
"""
if not self._logcat_tmpoutfile:
return None
with open(self._logcat_tmpoutfile.name) as f:
return f.read()
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() is None:
self.logcat_process.kill()
self.logcat_process.wait()
self.logcat_process = None
self._logcat_tmpoutfile.seek(0)
output = self._logcat_tmpoutfile.read()
self._logcat_tmpoutfile.close()
self._logcat_tmpoutfile = None
return output
@staticmethod
def SearchLogcatRecord(record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
If the name of a process exactly matches the given name, the pid of
that process will be inserted to the front of the pid list.
"""
pids = []
for line in self.RunShellCommand('ps', log_result=False):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
if process_name == data[-1]:
pids.insert(0, data[1]) # PID is in the second column
else:
pids.append(data[1])
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
IoStats = collections.namedtuple(
'IoStats',
['device',
'num_reads_issued',
'num_reads_merged',
'num_sectors_read',
'ms_spent_reading',
'num_writes_completed',
'num_writes_merged',
'num_sectors_written',
'ms_spent_writing',
'num_ios_in_progress',
'ms_spent_doing_io',
'ms_spent_doing_io_weighted',
])
for line in self.GetFileContents('/proc/diskstats', log_result=False):
fields = line.split()
stats = IoStats._make([fields[2]] + [int(f) for f in fields[3:]])
if stats.device == 'mmcblk0':
return {
'num_reads': stats.num_reads_issued,
'num_writes': stats.num_writes_completed,
'read_ms': stats.ms_spent_reading,
'write_ms': stats.ms_spent_writing,
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsageForPid(self, pid):
"""Returns the memory usage for given pid.
Args:
pid: The pid number of the specific process running on device.
Returns:
Dict of {metric:usage_kb}, for the process which has specified pid.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, VmHWM.
"""
showmap = self.RunShellCommand('showmap %d' % pid)
if not showmap or not showmap[-1].endswith('TOTAL'):
logging.warning('Invalid output for showmap %s', str(showmap))
return {}
items = showmap[-1].split()
if len(items) != 9:
logging.warning('Invalid TOTAL for showmap %s', str(items))
return {}
usage_dict = collections.defaultdict(int)
usage_dict.update({
'Size': int(items[0].strip()),
'Rss': int(items[1].strip()),
'Pss': int(items[2].strip()),
'Shared_Clean': int(items[3].strip()),
'Shared_Dirty': int(items[4].strip()),
'Private_Clean': int(items[5].strip()),
'Private_Dirty': int(items[6].strip()),
})
peak_value_kb = 0
for line in self.GetProtectedFileContents('/proc/%s/status' % pid):
if not line.startswith('VmHWM:'): # Format: 'VmHWM: +[0-9]+ kB'
continue
peak_value_kb = int(line.split(':')[1].strip().split(' ')[0])
break
usage_dict['VmHWM'] = peak_value_kb
if not peak_value_kb:
logging.warning('Could not find memory peak value for pid ' + str(pid))
return usage_dict
def ProcessesUsingDevicePort(self, device_port):
"""Lists processes using the specified device port on loopback interface.
Args:
device_port: Port on device we want to check.
Returns:
A list of (pid, process_name) tuples using the specified port.
"""
tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
tcp_address = '0100007F:%04X' % device_port
pids = []
for single_connect in tcp_results:
connect_results = single_connect.split()
# Column 1 is the TCP port, and Column 9 is the inode of the socket
if connect_results[1] == tcp_address:
socket_inode = connect_results[9]
socket_name = 'socket:[%s]' % socket_inode
lsof_results = self.RunShellCommand('lsof', log_result=False)
for single_process in lsof_results:
process_results = single_process.split()
# Ignore the line if it has less than nine columns in it, which may
# be the case when a process stops while lsof is executing.
if len(process_results) <= 8:
continue
# Column 0 is the executable name
# Column 1 is the pid
# Column 8 is the Inode in use
if process_results[8] == socket_name:
pids.append((int(process_results[1]), process_results[0]))
break
logging.info('PidsUsingDevicePort: %s', pids)
return pids
def FileExistsOnDevice(self, file_name):
"""Checks whether the given file exists on the device.
Args:
file_name: Full path of file to check.
Returns:
True if the file exists, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -e "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
status = self._adb.SendShellCommand(
'\'ls "%s" >/dev/null 2>&1; echo $?\'' % (file_name))
return int(status) == 0
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
def IsFileWritableOnDevice(self, file_name):
"""Checks whether the given file (or directory) is writable on the device.
Args:
file_name: Full path of file/directory to check.
Returns:
True if writable, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -w "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
raise errors.AbortError('"test" binary not found. OS too old.')
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
@staticmethod
def GetTimestamp():
return time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
@staticmethod
def EnsureHostDirectory(host_file):
host_dir = os.path.dirname(os.path.abspath(host_file))
if not os.path.exists(host_dir):
os.makedirs(host_dir)
def TakeScreenshot(self, host_file=None):
"""Saves a screenshot image to |host_file| on the host.
Args:
host_file: Absolute path to the image file to store on the host or None to
use an autogenerated file name.
Returns:
Resulting host file name of the screenshot.
"""
host_file = os.path.abspath(host_file or
'screenshot-%s.png' % self.GetTimestamp())
self.EnsureHostDirectory(host_file)
device_file = '%s/screenshot.png' % self.GetExternalStorage()
self.RunShellCommand(
'/system/bin/screencap -p %s' % device_file)
self.PullFileFromDevice(device_file, host_file)
self.RunShellCommand('rm -f "%s"' % device_file)
return host_file
def PullFileFromDevice(self, device_file, host_file):
"""Download |device_file| on the device from to |host_file| on the host.
Args:
device_file: Absolute path to the file to retrieve from the device.
host_file: Absolute path to the file to store on the host.
"""
if not self._adb.Pull(device_file, host_file):
raise device_errors.AdbCommandFailedError(
['pull', device_file, host_file], 'Failed to pull file from device.')
assert os.path.exists(host_file)
def SetUtilWrapper(self, util_wrapper):
"""Sets a wrapper prefix to be used when running a locally-built
binary on the device (ex.: md5sum_bin).
"""
self._util_wrapper = util_wrapper
def RunUIAutomatorTest(self, test, test_package, timeout):
"""Runs a single uiautomator test.
Args:
test: Test class/method.
test_package: Name of the test jar.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
cmd = 'uiautomator runtest %s -e class %s' % (test_package, test)
self._LogShell(cmd)
output = self._adb.SendShellCommand(cmd, timeout_time=timeout)
# uiautomator doesn't fully conform to the instrumenation test runner
# convention and doesn't terminate with INSTRUMENTATION_CODE.
# Just assume the first result is valid.
(test_results, _) = am_instrument_parser.ParseAmInstrumentOutput(output)
if not test_results:
raise errors.InstrumentationError(
'no test results... device setup correctly?')
return test_results[0]
def DismissCrashDialogIfNeeded(self):
"""Dismiss the error/ANR dialog if present.
Returns: Name of the crashed package if a dialog is focused,
None otherwise.
"""
re_focus = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
def _FindFocusedWindow():
match = None
for line in self.RunShellCommand('dumpsys window windows'):
match = re.match(re_focus, line)
if match:
break
return match
match = _FindFocusedWindow()
if not match:
return
package = match.group(2)
logging.warning('Trying to dismiss %s dialog for %s' % match.groups())
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_ENTER)
match = _FindFocusedWindow()
if match:
logging.error('Still showing a %s dialog for %s' % match.groups())
return package
def EfficientDeviceDirectoryCopy(self, source, dest):
""" Copy a directory efficiently on the device
Uses a shell script running on the target to copy new and changed files the
source directory to the destination directory and remove added files. This
is in some cases much faster than cp -r.
Args:
source: absolute path of source directory
dest: absolute path of destination directory
"""
logging.info('In EfficientDeviceDirectoryCopy %s %s', source, dest)
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'efficient_android_directory_copy.sh')
self._adb.Push(host_script_path, temp_script_file.name)
out = self.RunShellCommand(
'sh %s %s %s' % (temp_script_file.name, source, dest),
timeout_time=120)
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
for line in out:
logging.info('[%s]> %s', device_repr, line)
def _GetControlUsbChargingCommand(self):
if self._control_usb_charging_command['cached']:
return self._control_usb_charging_command['command']
self._control_usb_charging_command['cached'] = True
if not self.IsRootEnabled():
return None
for command in CONTROL_USB_CHARGING_COMMANDS:
# Assert command is valid.
assert 'disable_command' in command
assert 'enable_command' in command
assert 'witness_file' in command
witness_file = command['witness_file']
if self.FileExistsOnDevice(witness_file):
self._control_usb_charging_command['command'] = command
return command
return None
def CanControlUsbCharging(self):
return self._GetControlUsbChargingCommand() is not None
def DisableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['disable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to enable USB charging in time.')
self.RunShellCommand(disable_command)
if not self.IsDeviceCharging():
break
def EnableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['enable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to enable USB charging in time.')
self.RunShellCommand(disable_command)
if self.IsDeviceCharging():
break
def IsDeviceCharging(self):
for line in self.RunShellCommand('dumpsys battery'):
if 'powered: ' in line:
if line.split('powered: ')[1] == 'true':
return True
class NewLineNormalizer(object):
"""A file-like object to normalize EOLs to '\n'.
Pexpect runs adb within a pseudo-tty device (see
http://www.noah.org/wiki/pexpect), so any '\n' printed by adb is written
as '\r\n' to the logfile. Since adb already uses '\r\n' to terminate
lines, the log ends up having '\r\r\n' at the end of each line. This
filter replaces the above with a single '\n' in the data stream.
"""
def __init__(self, output):
self._output = output
def write(self, data):
data = data.replace('\r\r\n', '\n')
self._output.write(data)
def flush(self):
self._output.flush()
|
ondra-novak/chromium.src
|
build/android/pylib/android_commands.py
|
Python
|
bsd-3-clause
| 72,615
|
[
"Galaxy"
] |
378fcb99f9337ca332cd3b9b3e7b3b9418d1f6705a1059a4740cb6d56dd04b8b
|
#!/usr/bin/env python
"""
sacfs_flexcode.py - Database of SAGRN FLEX Addresses (for pagers)
Copyright 2010 - 2015 Michael Farrell <http://micolous.id.au/>
This file is automatically generated by a script (urgmsg_export_codes.py).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
CODES = {
1907737: 'CFS Yorketown Response',
1907765: 'CFS Mawson ops support Info',
1907872: 'CFS R1 area',
1907893: 'CFS Northern York Group Info',
1907896: 'CFS Kadina Info',
1908852: 'CFS Headquarters Info',
1908855: 'CFS Air Crew and Bomber Response',
1908858: 'CFS Bombers Info',
1908861: 'CFS Region 1 Maildrop',
1908862: 'CFS Mt Lofty Tower Info',
1908864: 'CFS R1 Operations Support Response',
1908866: 'CFS R1 HQ Response',
1908867: 'CFS R1 HQ Info',
1908869: 'CFS East Torrens Group Info',
1908870: 'CFS East Torrens Group Officers Response',
1908871: 'CFS Athelstone Response',
1908880: 'CFS Athelstone Info',
1908883: 'CFS Cherryville Info',
1908884: 'CFS Greenhill Info',
1908885: 'CFS Montacute Info',
1908886: 'CFS Norton Summit Info',
1908887: 'CFS Piccadilly Info',
1908888: 'CFS Summertown Info',
1908891: 'CFS Mundoo Group Officers Response',
1908893: 'CFS Goolwa Response',
1908894: 'CFS Middleton Response',
1908895: 'CFS Mount Compass Response',
1908898: 'CFS Goolwa Info',
1908899: 'CFS Middleton Info',
1908901: 'CFS Port Elliot Info',
1908902: 'CFS Heysen Group Officers Info',
1908903: 'CFS Heysen Group Info',
1908904: 'CFS Heysen Group Officers Response',
1908905: 'CFS Brukunga Response',
1908907: 'CFS Hahndorf Response',
1908909: 'CFS Macclesfield Response',
1908910: 'CFS Meadows Response',
1908911: 'CFS Mount Barker Response',
1908912: 'CFS Nairne Response',
1908914: 'CFS Echunga Info',
1908915: 'CFS Hahndorf Info',
1908916: 'CFS Littlehampton Info',
1908917: 'CFS Macclesfield Info',
1908919: 'CFS Mount Barker Info',
1908920: 'CFS Nairne Info',
1908943: 'CFS Kingscote Info',
1908949: 'CFS Kyeema Group Info',
1908950: 'CFS Kyeema Group Officers Response',
1908951: 'CFS Aldinga Response',
1908955: 'CFS Willunga Response',
1908958: 'CFS Aldinga Beach Info',
1908959: 'CFS Range / Hope Forest Info',
1908960: 'CFS Sellicks Info',
1908961: 'CFS Willunga Info',
1908962: 'CFS Yundi Info',
1908963: 'CFS Yundi Info',
1908966: 'CFS Mawson Group Info',
1908967: 'CFS Mawson Group Officers Response',
1908969: 'CFS Clarendon Response',
1908971: 'CFS Kangarilla Response',
1908973: 'CFS Morphett Vale Response',
1908975: 'CFS Blewitt Springs Info',
1908976: 'CFS Clarendon Info',
1908977: 'CFS Happy Valley Info',
1908978: 'CFS Kangarilla Info',
1908979: 'CFS McLaren Flat Info',
1908981: 'CFS Seaford Info',
1908989: 'CFS Lofty Group Officers Info',
1908990: 'CFS Lofty Group Info',
1908991: 'CFS Lofty Group Officers Response',
1908992: 'CFS Aldgate Response',
1908993: 'CFS Bradbury Repsonse',
1908994: 'CFS Bridgewater Response',
1908995: 'CFS Burnside Response',
1908996: 'CFS Ironbank Response',
1908998: 'CFS Stirling Response',
1908999: 'CFS Upper Sturt Response',
1909000: 'CFS Aldgate Info',
1909001: 'CFS Bradbury Info',
1909003: 'CFS Burnside Info',
1909004: 'CFS Ironbank Info',
1909005: 'CFS Mylor Info',
1909006: 'CFS Stirling Info',
1909018: 'CFS Onkaparinga Group Officers Response',
1909019: 'CFS Lenswood / Forest Range Response',
1909020: 'CFS Lobethal Response',
1909021: 'CFS Oakbank / Balhannah Response',
1909022: 'CFS Woodside Response',
1909024: 'CFS Lobethal Info',
1909025: 'CFS Oakbank / Balhannah Info',
1909028: 'CFS Strathalbyn Group Info',
1909029: 'CFS Strathalbyn Group Officers Response',
1909030: 'CFS Ashbourne Response',
1909031: 'CFS Blackfellows Creek Response',
1909033: 'CFS Langhorne Creek Response',
1909034: 'CFS Milang Response',
1909035: 'CFS Strathalbyn Response',
1909036: 'CFS Woodchester Response',
1909042: 'CFS Strathalbyn Info',
1909043: 'CFS Woodchester Info',
1909045: 'CFS Sturt Group Info',
1909046: 'CFS Sturt Group Officers Response',
1909047: 'CFS Belair Response',
1909048: 'CFS Blackwood Response',
1909049: 'CFS Cherry Gardens Response',
1909050: 'CFS Coromandel Valley Response',
1909051: 'CFS Eden Hills Response',
1909053: 'CFS Blackwood Info',
1909054: 'CFS Cherry Gardens Info',
1909056: 'CFS Eden Hills Info',
1909067: 'CFS Victor Group Officers Response',
1909068: 'CFS Hindmarsh Valley Response',
1909071: 'CFS Hindmarsh Valley Info',
1909073: 'CFS Waitpinga Info',
1909075: 'CFS Sthrn Fleurieu Group Info',
1909076: 'CFS Sthrn Fleurieu Gp Officer Response',
1909080: 'CFS Myponga Response',
1909083: 'CFS Yankalilla Response & SES Response',
1909084: 'CFS Cape Jervis Info',
1909086: 'CFS Inman Valley Info',
1909087: 'CFS Myponga Info',
1909088: 'CFS Parawa Info',
1909090: 'CFS Yankalilla Info',
1909092: 'CFS R2 - FIREBAN M/D',
1909095: 'CFS Region 2 Ops Info',
1909096: 'CFS R2 HQ Response',
1909097: 'CFS R2 HQ Info',
1909098: 'CFS Angaston Gp Officer Info',
1909099: 'CFS Angaston Group Info',
1909100: 'CFS Angaston Group Officers Response',
1909101: 'CFS Angaston Response',
1909102: 'CFS Nuriootpa Response',
1909103: 'CFS Tanunda Response',
1909104: 'CFS Truro Repsonse',
1909105: 'CFS Angaston Info',
1909106: 'CFS Nuriootpa Info',
1909107: 'CFS Tanunda Info',
1909108: 'CFS Truro Info',
1909109: 'CFS Barossa Officers Info',
1909111: 'CFS Barossa Group Officers Response',
1909112: 'CFS Concordia Response',
1909114: 'CFS Lyndoch Response',
1909116: 'CFS Mount Pleasant Response',
1909118: 'CFS Williamstown Response',
1909119: 'CFS Concordia Info',
1909121: 'CFS Lyndoch Info',
1909124: 'CFS Williamstown Info',
1909125: 'CFS Williamstown Info',
1909127: 'CFS STC Info',
1909128: 'CFS Gumeracha Group Officers Info',
1909129: 'CFS Gumeracha Group Info',
1909130: 'CFS Gumeracha Group Officers Response',
1909131: 'CFS Birdwood Response',
1909132: 'CFS Cudlee Creek Response',
1909133: 'CFS Forreston Response',
1909134: 'CFS Gumeracha Response',
1909135: 'CFS Hermitage Response',
1909136: 'CFS Kersbrook Response',
1909138: 'CFS Mount Torrens Response',
1909139: 'CFS Paracombe Response',
1909140: 'CFS Birdwood Info',
1909142: 'CFS Forreston Info',
1909144: 'CFS Hermitage Info',
1909145: 'CFS Kersbrook Info',
1909147: 'CFS Mt Torrens Info',
1909148: 'CFS Paracombe Info',
1909150: 'CFS Horrocks Group Info',
1909156: 'CFS Clare Info',
1909162: 'CFS Light Group Officers Response',
1909163: 'CFS Dublin Response',
1909164: 'CFS Freeling Response',
1909165: 'CFS Gawler River Response',
1909166: 'CFS Greenock Response',
1909168: 'CFS Mallala Response',
1909170: 'CFS Roseworthy Response',
1909171: 'CFS Sheaoak Log Response',
1909178: 'CFS Kapunda Info',
1909187: 'CFS Para Group Officers Response',
1909188: 'CFS Dalkeith Response',
1909189: 'CFS One Tree Hill Response',
1909190: 'CFS Salisbury Response',
1909191: 'CFS Tea Tree Gully Response',
1909192: 'CFS Virginia Repsonse',
1909193: 'CFS Dalkeith Info',
1909194: 'CFS One Tree Hill Info',
1909195: 'CFS Salisbury Info',
1909197: 'CFS Virginia Info',
1909202: 'CFS Wakefield Plains Gp Response',
1909210: 'CFS Port Wakefield Response',
1909213: 'CFS Balaklava Info',
1909214: 'CFS Hamley Bridge Info',
1909217: 'CFS Owen Info',
1909219: 'CFS R5 WX Maildrop',
1909224: 'CFS R5 HQ Response',
1909225: 'CFS R5 HQ Info',
1909227: 'CFS Lacepede Group Info',
1909243: 'CFS Lucindale Group Officers Response',
1909244: 'CFS Avenue Range Repsonse',
1909245: 'CFS Biscuit Flat Response',
1909246: 'CFS Callendale Response',
1909247: 'CFS Lucindale Response',
1909248: 'CFS Spence Response',
1909249: 'CFS Stewarts Range Response',
1909250: 'CFS Woolumbool Repsone',
1909259: 'CFS Gambier Group Info',
1909260: 'CFS Gambier Group Officers Response',
1909262: 'CFS Compton Response',
1909265: 'CFS Moorak Repsonse',
1909269: 'CFS Yahl Response',
1909279: 'CFS Yahl Info',
1909283: 'CFS Naracoorte Group Officers Response',
1909287: 'CFS Hynam Response',
1909290: 'CFS Naracoorte Response',
1909295: 'CFS Hynam Info',
1909298: 'CFS Naracoorte Info',
1909319: 'CFS Robe Group Repsonse',
1909320: 'CFS Bray Response',
1909321: 'CFS Greenways Response',
1909322: 'CFS Mount Benson Response',
1909323: 'CFS Robe Response',
1909344: 'CFS Bangham Info',
1909345: 'CFS Bordertown Info',
1909346: 'CFS Keith Info',
1909347: 'CFS Kongal Info',
1909348: 'CFS Laffer Info',
1909349: 'CFS McCallum Info',
1909350: 'CFS Mundulla Info',
1909351: 'CFS Padthaway Info',
1909352: 'CFS Senior Info',
1909353: 'CFS Sherwood Info',
1909354: 'CFS Western Flat Info',
1909355: 'CFS Willalooka Info',
1909356: 'CFS Wolseley Info',
1909357: 'CFS Wattle Range Group Officers Info',
1909358: 'CFS Wattle Range Info',
1909359: 'CFS Wattle Range Group Officers Response',
1909361: 'CFS Beachport Response',
1909363: 'CFS Coonawarra Response',
1909368: 'CFS Millicent Reponse',
1909370: 'CFS Mount Burr Response',
1909388: 'CFS Millicent Info',
1909399: 'CFS Wattle Range Ops Info?',
1909433: 'CFS Gilbert Group Info',
1909434: 'CFS Gilbert Group Response',
1909436: 'CFS Eudunda Response',
1909439: 'CFS Neales Flat Response',
1909444: 'CFS Tarlee Response',
1909448: 'CFS Eudunda Info',
1909800: 'CFS State Air Desk',
1915879: 'CFS Waitpinga Info',
1919096: 'CFS Northern Yorke Peninsula Gp Officers Response',
1919097: 'CFS Alford Response',
1919098: 'CFS Bute Response',
1919100: 'CFS Paskeville Response',
1919109: 'CFS Southern Yorke Group Officers respons',
1919113: 'CFS Edithburgh Response',
1919114: 'CFS Minlaton Repsonse',
1919115: 'CFS Marion Bay Response',
1919117: 'CFS Stansbury Response',
1919118: 'CFS Warooka Response',
1919129: 'CFS Yorke Valley Gp Response',
1919130: 'CFS Yorke Valley Gp Info',
1919131: 'CFS Ardrossan Response',
1919132: 'CFS Maitland Response',
1919133: 'CFS Port Clinton Response',
1919134: 'CFS Port Victoria Response',
1919135: 'CFS Balgowan Response',
1919136: 'CFS Weetulta / Nalyappa Response',
1919145: 'CFS Black Rock Group Officers Response',
1919149: 'CFS Peterborough Response',
1919158: 'CFS Blyth-Snowtown Group Officers respons',
1919163: 'CFS Snowtown Response',
1919170: 'CFS Bundaleer Group Info',
1919171: 'CFS Bundaleer Group Officers Response',
1919172: 'CFS Caltowie Response',
1919174: 'CFS Gladstone Response',
1919176: 'CFS Jamestown Response',
1919177: 'CFS Laura Response',
1919178: 'CFS Narridy Response',
1919185: 'CFS Laura Info',
1919203: 'CFS Flinders Group Officers Response',
1919204: 'CFS Hawker Response',
1919212: 'CFS Stirling North Info',
1919222: 'CFS Spalding Info',
1919223: 'CFS Terowie Info',
1919224: 'CFS Whyte Yarcowie Info',
1919232: 'CFS Woomera Response',
1919240: 'CFS Melrose Group Officers Response',
1919241: 'CFS Appila Response',
1919242: 'CFS Booleroo Response',
1919244: 'CFS Melrose Response',
1919247: 'CFS Wirrabarra Response',
1919254: 'CFS Wirrabarra Info',
1919264: 'CFS Spencer Group Officers Response',
1919265: 'CFS Crystal Brook Response',
1919268: 'CFS Napperby Response',
1919269: 'CFS Port Broughton Response',
1919270: 'CFS Redhill Response',
1919274: 'CFS Koolunga Info',
1919278: 'CFS Redhill Info',
1919288: 'CFS R4 HQ Response',
1919289: 'CFS R4 HQ Info',
1919293: 'CFS Marla Response',
1919303: 'CFS Chaffey Group Officers Response',
1919304: 'CFS Barmera Response',
1919310: 'CFS Paringa Response',
1919314: 'CFS Browns Well Info',
1919318: 'CFS Moorook Info',
1919320: 'CFS Taplan Info',
1919321: 'CFS Wunkar Info',
1919325: 'CFS Colebatch Response',
1919326: 'CFS Coomandook Response',
1919327: 'CFS Coonalpyn Response',
1919329: 'CFS Ki Ki Response',
1919347: 'CFS Coorong Group Officers Response',
1919349: 'CFS Field Response',
1919350: 'CFS Meningie Response',
1919351: 'CFS Narrung Response',
1919352: 'CFS Salt Creek R3 Response',
1919353: 'CFS Tailem Bend Response',
1919356: 'CFS Meningie Info',
1919362: 'CFS Mallee Group Officers Response',
1919371: 'CFS Pinnaroo Response',
1919377: 'CFS Karoonda Info',
1919385: 'CFS Mid Murray Group Officers Response',
1919386: 'CFS Blanchetown Response',
1919387: 'CFS Cadell Response',
1919389: 'CFS Morgan Response',
1919390: 'CFS Waikerie Response',
1919396: 'CFS Swanport Gp Officers Info',
1919398: 'CFS Swanport Group Officers Response',
1919399: 'CFS Callington Response',
1919402: 'CFS Monarto Response',
1919405: 'CFS Callington Info',
1919410: 'CFS Mypolonga Info',
1919413: 'CFS Ridley Group Officer Response',
1919415: 'CFS Keyneton Response',
1919418: 'CFS Sedan Response',
1919430: 'CFS Region 3 Info ?',
1919433: 'CFS R3 HQ Response',
1919434: 'CFS R3 HQ Info',
1919437: 'CFS Ceduna Group Officers Response',
1919438: 'CFS Ceduna Response',
1919449: 'CFS Cleve Group Info',
1919450: 'CFS Cleve Group Officers Response',
1919451: 'CFS Arno Bay Response',
1919452: 'CFS Cleve Response',
1919471: 'CFS Elliston Group Officers Response',
1919472: 'CFS Elliston Response',
1919473: 'CFS Lock Response',
1919488: 'CFS Kimba Group Officers Response',
1919490: 'CFS Kimba Response',
1919500: 'CFS Cootra Response',
1919501: 'CFS Minnipa Response',
1919502: 'CFS Mount Damper Response',
1919503: 'CFS Warramboo Response',
1919504: 'CFS Wudinna Response',
1919509: 'CFS Wudinna Info',
1919511: 'CFS Lower Eyre Peninsula Gp Info',
1919512: 'CFS Lower Eyre Peninsula Gp Officers re',
1919515: 'CFS Coulta Response',
1919520: 'CFS Lincoln Response',
1919534: 'CFS Lincoln Info',
1919546: 'CFS Poochera Response',
1919547: 'CFS Streaky Bay Response',
1919552: 'CFS Streaky Bay Info',
1919574: 'CFS Region 6 Info',
1927244: 'CFS Leigh Creek Response',
191557: 'CFS Brooker Response',
1800068: 'CFS Belair Water Tower Siren',
1900131: 'CFS HQ On Call Pager',
1900132: 'CFS On call 8',
1900257: 'CFS Cherryville Info',
1907398: 'CFS Callington Response',
1907556: 'CFS Monarto Response',
1907737: 'CFS Yorketown Response',
1907738: 'CFS Yorketown Info',
1907764: 'CFS Mawson CFS Group Operations Support Response',
1907765: 'CFS Mawson CFS Group Operations Support Info',
1907893: 'CFS Northern Yorke Peninsula Group Info',
1907895: 'CFS Kadina Response',
1907896: 'CFS Kadina Info',
1907956: 'CFS Region 4 Info',
1907997: 'CFS McLaren Vale CDSI Failure',
1908852: 'CFS Headquarters Sig Inc (Day)',
1908855: 'CFS Air Crew And Bombers Response',
1908858: 'CFS Air Crew And Bombers Info',
1908860: 'CFS Region 1 Weather Maildrop',
1908861: 'CFS Region 1 Fire Ban Info',
1908862: 'CFS Mount Lofty Fire Tower Info',
1908863: 'CFS Region 1 Info',
1908864: 'CFS Region 1 Operations Support',
1908865: 'CFS Region 1 Operations Info',
1908866: 'CFS Region 1 Headquarters Response',
1908867: 'CFS Region 1 Headquarters Info',
1908868: 'CFS East Torrens Group Officers Info',
1908869: 'CFS East Torrens Group Info',
1908870: 'CFS East Torrens Group Officers Response',
1908871: 'CFS Athelstone Response',
1908872: 'CFS Basket Range Response',
1908873: 'CFS Carey Gully Response',
1908874: 'CFS Cherryville Response',
1908875: 'CFS Greenhill Response',
1908876: 'CFS Montacute Response',
1908877: 'CFS Norton Summit Response',
1908878: 'CFS Piccadilly Response',
1908879: 'CFS Summertown and District Response',
1908880: 'CFS Athelstone Info',
1908881: 'CFS Basket Range Info',
1908882: 'CFS Carey Gully Info',
1908883: 'CFS Cherryville Info',
1908884: 'CFS Greenhill Info',
1908885: 'CFS Montacute Info',
1908886: 'CFS Norton Summit Info',
1908887: 'CFS Piccadilly Info',
1908888: 'CFS Summertown and District Info',
1908889: 'CFS Mundoo Group Officers Info',
1908890: 'CFS Mundoo Group Info',
1908891: 'CFS Mundoo Group Officers Response',
1908892: 'CFS Currency Creek Response',
1908893: 'CFS Goolwa Response',
1908894: 'CFS Middleton Response',
1908895: 'CFS Mount Compass Response',
1908896: 'CFS Port Elliot Response',
1908897: 'CFS Currency Creek Info',
1908898: 'CFS Goolwa Info',
1908899: 'CFS Middleton Info',
1908900: 'CFS Mount Compass Info',
1908901: 'CFS Port Elliot Info',
1908902: 'CFS Heysen Group Officers Info',
1908903: 'CFS Heysen Group Info',
1908904: 'CFS Heysen Group Officers Response',
1908905: 'CFS Brukunga Response',
1908906: 'CFS Echunga Response',
1908907: 'CFS Hahndorf Response',
1908908: 'CFS Littlehampton Response',
1908909: 'CFS Macclesfield Response',
1908910: 'CFS Meadows Response',
1908911: 'CFS Mount Barker Response',
1908912: 'CFS Nairne Response',
1908913: 'CFS Brukunga Info',
1908914: 'CFS Echunga Info',
1908915: 'CFS Handorf Info',
1908916: 'CFS Littlehampton Info',
1908917: 'CFS Macclesfield Info',
1908918: 'CFS Meadows Info',
1908919: 'CFS Mount Barker Info',
1908920: 'CFS Nairne Info',
1908929: 'CFS Kangaroo Island Group Officers Info',
1908930: 'CFS Kangaroo Island Group Info',
1908931: 'CFS Kangaroo Island Group Officers Response',
1908932: 'CFS American River Response',
1908933: 'CFS Duncan Gosse Response',
1908934: 'CFS Haines Mcgillivray Response',
1908935: 'CFS Kingscote Response',
1908936: 'CFS Parndana Response',
1908937: 'CFS Penneshaw Response',
1908938: 'CFS Western Districts Response',
1908939: 'CFS Wisanger Response',
1908940: 'CFS American River Info',
1908942: 'CFS Haines MacGillivray Info',
1908943: 'CFS Kingscote Info',
1908944: 'CFS Parndana Info',
1908945: 'CFS Penneshaw Info',
1908946: 'CFS Western District Info',
1908947: 'CFS Wisanger Info',
1908948: 'CFS Kyeema Group Officers Info',
1908949: 'CFS Kyeema Group Info',
1908950: 'CFS Kyeema Group Officers Response',
1908951: 'CFS Aldinga Beach Response',
1908952: 'CFS Mclaren Vale Response',
1908953: 'CFS Range / Hope Forest Response',
1908954: 'CFS Sellicks Response',
1908955: 'CFS Willunga Response',
1908956: 'CFS Yundi Response',
1908958: 'CFS Aldinga Beach Info',
1908959: 'CFS Mclaren Vale Info',
1908960: 'CFS Range / Hope Forest Info',
1908961: 'CFS Sellicks Info',
1908962: 'CFS Willunga Info',
1908963: 'CFS Yundi Info',
1908965: 'CFS Mawson Group Officers Info',
1908966: 'CFS Mawson Group Info',
1908967: 'CFS Mawson Group Officers Response',
1908968: 'CFS Blewitt Springs Response',
1908969: 'CFS Clarendon Response',
1908970: 'CFS Happy Valley Response',
1908971: 'CFS Kangarilla Response',
1908972: 'CFS Mclaren Flat Response',
1908973: 'CFS Morphett Vale Response',
1908974: 'CFS Seaford Response',
1908975: 'CFS Blewitt Springs Info',
1908976: 'CFS Clarendon Info',
1908977: 'CFS Happy Valley Info',
1908978: 'CFS Kangarilla Info',
1908979: 'CFS Mclaren Flat Info',
1908980: 'CFS Morphett Vale Info',
1908981: 'CFS Seaford Info',
1908984: 'CFS Happy Valley Officers Info',
1908985: 'CFS Kangarilla Officers Info',
1908986: 'CFS Mclaren Flat Officers Info',
1908987: 'CFS Morphett Vale Officers Info',
1908988: 'CFS Seaford Officers Info',
1908989: 'CFS Mount Lofty Group Officers Info',
1908990: 'CFS Mount Lofty Group Info',
1908991: 'CFS Mount Lofty Group Officers Response',
1908992: 'CFS Aldgate Response',
1908993: 'CFS Bradbury Response',
1908994: 'CFS Bridgewater Response',
1908995: 'CFS Burnside Response',
1908996: 'CFS Ironbank Response',
1908997: 'CFS Mylor Response',
1908998: 'CFS Stirling Response',
1908999: 'CFS Upper Sturt Response',
1909000: 'CFS Aldgate Info',
1909001: 'CFS Bradbury Info',
1909002: 'CFS Bridgewater Info',
1909003: 'CFS Burnside Info',
1909004: 'CFS Ironbank Info',
1909005: 'CFS Mylor Info',
1909006: 'CFS Stirling Info',
1909007: 'CFS Upper Sturt Info',
1909014: 'CFS Stirling Officers Info',
1909016: 'CFS Onkaparinga Group officers Info',
1909017: 'CFS Onkaparinga Group Info',
1909018: 'CFS Onkaparinga Group Officers Response',
1909019: 'CFS Lenswood / Forest Range Response',
1909020: 'CFS Lobethal Response',
1909021: 'CFS Oakbank / Balhannah Response',
1909022: 'CFS Woodside Response',
1909024: 'CFS Lobethal Info',
1909025: 'CFS Oakbank / Balhanna Info',
1909026: 'CFS Woodside Info',
1909027: 'CFS Strathalbyn Group Officers Info',
1909028: 'CFS Strathalbyn Group Info',
1909029: 'CFS Strathalbyn Group Officers Response',
1909030: 'CFS Ashbourne Response',
1909031: 'CFS Blackfellows Creek Response',
1909032: 'CFS Clayton Response',
1909033: 'CFS Langhorne Creek Response',
1909034: 'CFS Milang Response',
1909035: 'CFS Strathalbyn Response',
1909036: 'CFS Woodchester Response',
1909037: 'CFS Ashbourne Info',
1909038: 'CFS Blackfellows Creek Info',
1909039: 'CFS Clayton Info',
1909040: 'CFS Langhorne Creek Info',
1909041: 'CFS Milang Info',
1909042: 'CFS Strathalbyn Info',
1909043: 'CFS Woodchester Info',
1909045: 'CFS Sturt Group Info',
1909046: 'CFS Sturt Group Officers Response',
1909047: 'CFS Belair Response',
1909048: 'CFS Blackwood Response',
1909049: 'CFS Cherry Gardens Response',
1909050: 'CFS Coromandel Valley Response',
1909051: 'CFS Eden Hills Response',
1909052: 'CFS Belair Info',
1909053: 'CFS Blackwood Info',
1909054: 'CFS Cherry Gardens Info',
1909055: 'CFS Coromandel Valley Info',
1909056: 'CFS Eden Hills Info',
1909063: 'CFS Sturt Group Operations Response',
1909064: 'CFS Sturt Group Operations Info',
1909065: 'CFS Victor Harbor Group Officers Info',
1909066: 'CFS Victor Harbor Group Info',
1909067: 'CFS Victor Harbor Group Officers Response',
1909068: 'CFS Hindmarsh Valley Response',
1909069: 'CFS Lower Inman Valley Response',
1909070: 'CFS Waitpinga Response',
1909071: 'CFS Hindmarsh Valley Info',
1909072: 'CFS Lower Inman Valley Info',
1909073: 'CFS Waitpinga Info',
1909074: 'CFS Southern Fleurieu Group Officers Info',
1909075: 'CFS Southern Fleurieu Group Info',
1909076: 'CFS Southern Fleurieu Group Officers Response',
1909077: 'CFS Cape Jervis Response',
1909078: 'CFS Hay Flat Response',
1909079: 'CFS Inman Valley Response',
1909080: 'CFS Myponga Response',
1909081: 'CFS Parawa Response',
1909082: 'CFS Rapid Bay Response',
1909083: 'CFS Yankallila and District Response',
1909084: 'CFS Cape Jervis Info',
1909085: 'CFS Hay Flat Info',
1909086: 'CFS Inman Valley Info',
1909087: 'CFS Myponga Info',
1909088: 'CFS Parawa Info',
1909089: 'CFS Rapid Bay Info',
1909090: 'CFS Yankalilla Info',
1909091: 'CFS Region 2 Weather Maildrop',
1909092: 'CFS Region 2 Fire Ban Info',
1909093: 'CFS Region 2 Info',
1909094: 'CFS Region 2 Operations Support Response',
1909095: 'CFS Region 2 Operations Support Info',
1909096: 'CFS Region 2 Headquarters Response',
1909097: 'CFS Region 2 Headquarters Info',
1909098: 'CFS Northern Barossa Group Officers Info',
1909099: 'CFS Northern Barossa Group Info',
1909100: 'CFS Northern Barossa Group Officers Response',
1909101: 'CFS Angaston Response',
1909102: 'CFS Nuriootpa Response',
1909103: 'CFS Tanunda Response',
1909104: 'CFS Truro Response',
1909105: 'CFS Angaston Info',
1909106: 'CFS Nuriootpa Info',
1909107: 'CFS Tanunda Info',
1909108: 'CFS Truro Info',
1909109: 'CFS Barossa Group Officers Info',
1909110: 'CFS Barossa Group Info',
1909111: 'CFS Barossa Group Officers Response',
1909112: 'CFS Concordia Response',
1909113: 'CFS Eden Valley Response',
1909114: 'CFS Lyndoch Response',
1909116: 'CFS Mount Pleasant Response',
1909117: 'CFS Springton Response',
1909118: 'CFS Williamstown Response',
1909119: 'CFS Concordia Info',
1909120: 'CFS Eden Valley Info',
1909121: 'CFS Lyndoch Info',
1909123: 'CFS Mount Pleasant Info',
1909124: 'CFS Springton Info',
1909125: 'CFS Williamstown Info',
1909127: 'CFS State Training Centre Info',
1909128: 'CFS Gumeracha Group Officers Info',
1909129: 'CFS Gumeracha Group Info',
1909130: 'CFS Gumeracha Group Officers Response',
1909131: 'CFS Birdwood Response',
1909132: 'CFS Cudlee Creek Response',
1909133: 'CFS Forreston Response',
1909134: 'CFS Gumeracha Response',
1909135: 'CFS Hermitage Response',
1909136: 'CFS Kersbrook Response',
1909138: 'CFS Mount Torrens Response',
1909139: 'CFS Paracombe Response',
1909140: 'CFS Birdwood Info',
1909141: 'CFS Cudlee Creek Info',
1909142: 'CFS Forreston Info',
1909143: 'CFS Gumeracha Info',
1909144: 'CFS Hermitage Info',
1909145: 'CFS Kersbrook Info',
1909147: 'CFS Mount Torrens Info',
1909148: 'CFS Paracombe Info',
1909149: 'CFS Horrocks Group Officers Info',
1909150: 'CFS Horrocks Group Info',
1909151: 'CFS Horrocks Group Officers Response',
1909152: 'CFS Clare Response',
1909153: 'CFS Mintaro Response',
1909154: 'CFS Sevenhill Response',
1909155: 'CFS Watervale Response',
1909156: 'CFS Clare Info',
1909157: 'CFS Mintaro Info',
1909158: 'CFS Sevenhill Info',
1909159: 'CFS Watervale Info',
1909160: 'CFS Light Group Officers Info',
1909161: 'CFS Light Group Info',
1909162: 'CFS Light Group Officers Response',
1909163: 'CFS Dublin Response',
1909164: 'CFS Freeling Response',
1909165: 'CFS Gawler River Response',
1909166: 'CFS Greenock Response',
1909167: 'CFS Kapunda Response',
1909168: 'CFS Mallala District Response',
1909169: 'CFS Mudla Wirra Response',
1909170: 'CFS Roseworthy Response',
1909171: 'CFS Sheaoak Log Response',
1909172: 'CFS Two Wells Response',
1909173: 'CFS Woolsheds / Wasleys Response',
1909174: 'CFS Dublin Info',
1909175: 'CFS Freeling Info',
1909176: 'CFS Gawler River Info',
1909177: 'CFS Greenock Info',
1909178: 'CFS Kapunda Info',
1909179: 'CFS Mallala District Info',
1909180: 'CFS Mudla Wirra Info',
1909181: 'CFS Roseworthy Info',
1909182: 'CFS Sheaoak Log Info',
1909183: 'CFS Two Wells Info',
1909184: 'CFS Woolsheds / Wasleys Info',
1909185: 'CFS Para Group Officers Info',
1909186: 'CFS Para Group Info',
1909187: 'CFS Para Group Officers Response',
1909188: 'CFS Dalkeith Response',
1909189: 'CFS One Tree Hill Response',
1909190: 'CFS Salisbury Response',
1909191: 'CFS Tea Tree Gully Response',
1909192: 'CFS Virginia Response',
1909193: 'CFS Dalkeith Info',
1909194: 'CFS One Tree Hill Info',
1909195: 'CFS Salisbury Info',
1909196: 'CFS Tea Tree Gully Info',
1909197: 'CFS Virginia Info',
1909201: 'CFS Wakefield Plains Group Info',
1909202: 'CFS Wakefield Plains Group Officers Response',
1909203: 'CFS Alma Response',
1909204: 'CFS Avon Response',
1909205: 'CFS Balaklava Response',
1909206: 'CFS Hamley Bridge Response',
1909207: 'CFS Mount Templeton Response',
1909208: 'CFS Nantawarra Response',
1909209: 'CFS Owen Response',
1909210: 'CFS Port Wakefield Response',
1909211: 'CFS Alma Info',
1909212: 'CFS Avon info',
1909213: 'CFS Balaklava Info',
1909214: 'CFS Hamley Bridge info',
1909215: 'CFS Mount Templeton Info',
1909216: 'CFS Nantawarra info',
1909217: 'CFS Owen Info',
1909218: 'CFS Port Wakefield Info',
1909219: 'CFS Region 5 Weather Maildrop',
1909220: 'CFS Region 5 Fire Ban Info',
1909221: 'CFS Region 5 Info',
1909222: 'CFS Region 5 Operations Support',
1909223: 'CFS Region 5 Operations',
1909224: 'CFS Region 5 Headquarters Response',
1909225: 'CFS Region 5 Headquarters Info',
1909226: 'CFS Lacepede Group Officers Info',
1909227: 'CFS Lacepede Group Info',
1909228: 'CFS Lacepede Group Officers Response',
1909229: 'CFS Keilira Response',
1909230: 'CFS Kingston Response',
1909231: 'CFS Marcollat Response',
1909232: 'CFS Reedy Creek Response',
1909233: 'CFS Taratap Response',
1909234: 'CFS Tilley Swamp Response',
1909235: 'CFS Keilira Info',
1909236: 'CFS Kingston Info',
1909237: 'CFS Marcollat Info',
1909238: 'CFS Reedy Creek Info',
1909239: 'CFS Taratap Info',
1909240: 'CFS Tilley Swamp Info',
1909241: 'CFS Lucindale Group Officers Info',
1909242: 'CFS Lucindale Group Info',
1909243: 'CFS Lucindale Group Officers Response',
1909244: 'CFS Avenue Range Response',
1909245: 'CFS Biscuit Flat Response',
1909246: 'CFS Callendale Response',
1909247: 'CFS Lucindale Response',
1909248: 'CFS Spence Response',
1909249: 'CFS Stewarts Range Response',
1909250: 'CFS Woolumbool Response',
1909251: 'CFS Avenue Range Info',
1909252: 'CFS Biscuit Flat Info',
1909253: 'CFS Callendale Info',
1909254: 'CFS Lucindale Info',
1909255: 'CFS Spence Info',
1909256: 'CFS Stewarts Range Info',
1909257: 'CFS Woolumbool Info',
1909258: 'CFS Mount Gambier District Group Officers Info',
1909259: 'CFS Mount Gambier District Group Info',
1909260: 'CFS Mount Gambier District Group Officers Response',
1909261: 'CFS Auspine Response',
1909262: 'CFS Compton Response',
1909263: 'CFS Green Triangle Forestry Response',
1909264: 'CFS Mil Lel Response',
1909265: 'CFS Moorak Response',
1909266: 'CFS Pleasant Park Response',
1909267: 'CFS Tarpeena Response',
1909268: 'CFS Wandilo Response',
1909269: 'CFS Yahl Response',
1909270: 'CFS Mount Gambier District Operations Response',
1909272: 'CFS Compton Info',
1909274: 'CFS Mil Lel Info',
1909275: 'CFS Moorak Info',
1909276: 'CFS Pleasant Park Info',
1909277: 'CFS Tarpeena Info',
1909278: 'CFS Wandilo Info',
1909279: 'CFS Yahl Info',
1909280: 'CFS Mount Gambier District Operations Info',
1909281: 'CFS Naracoorte Group Officers Info',
1909282: 'CFS Naracoorte Group Info',
1909283: 'CFS Naracoorte Group Officers Response',
1909284: 'CFS Cadgee Response',
1909285: 'CFS Bool Lagoon Response',
1909286: 'CFS Frances Response',
1909287: 'CFS Hynam Response',
1909288: 'CFS Kybybolite Response',
1909289: 'CFS Lochaber Response',
1909290: 'CFS Naracoorte Response',
1909291: 'CFS Joanna Response',
1909293: 'CFS Bool Lagoon Info',
1909294: 'CFS Frances Info',
1909295: 'CFS Hynam Info',
1909296: 'CFS Kybybolite Info',
1909297: 'CFS Lochaber Info',
1909298: 'CFS Naracoorte Info',
1909299: 'CFS Joanna Info',
1909300: 'CFS Kingsley Group Officers Info',
1909301: 'CFS Kingsley Group Info',
1909302: 'CFS Kingsley Group Officers Response',
1909303: 'CFS Allendale East Response',
1909304: 'CFS Blackfellows Caves Response',
1909305: 'CFS Donovans Response',
1909306: 'CFS Kongorong Response',
1909307: 'CFS Mount Schank Response',
1909308: 'CFS Port Macdonnell Response',
1909310: 'CFS Allendale East Info',
1909311: 'CFS Blackfellows Caves Info',
1909312: 'CFS Donovans Info',
1909313: 'CFS Kongorong Info',
1909314: 'CFS Mount Schank Info',
1909315: 'CFS Port MacDonnell Info',
1909317: 'CFS Robe Group Officers Info',
1909318: 'CFS Robe Group Info',
1909319: 'CFS Robe Group Officers Response',
1909320: 'CFS Bray Response',
1909321: 'CFS Greenways Response',
1909322: 'CFS Mount Benson Response',
1909323: 'CFS Robe Response',
1909324: 'CFS Bray Info',
1909325: 'CFS Greenways Info',
1909326: 'CFS Mount Benson Info',
1909327: 'CFS Robe Info',
1909328: 'CFS Tatiara Group Officers Info',
1909329: 'CFS Tatiara Group Info',
1909330: 'CFS Tatiara Group Officers Response',
1909331: 'CFS Bangham Response',
1909332: 'CFS Bordertown Response',
1909333: 'CFS Keith Response',
1909334: 'CFS Kongal Response',
1909335: 'CFS Laffer Response',
1909336: 'CFS Mccallum Response',
1909337: 'CFS Mundulla Response',
1909338: 'CFS Padthaway Response',
1909339: 'CFS Senior Response',
1909340: 'CFS Sherwood Response',
1909341: 'CFS Western Flat Response',
1909342: 'CFS Willalooka Response',
1909343: 'CFS Wolseley Response',
1909344: 'CFS Bangham Info',
1909345: 'CFS Bordertown Info',
1909346: 'CFS Keith Info',
1909347: 'CFS Kongal Info',
1909348: 'CFS Laffer Info',
1909349: 'CFS McCallum Info',
1909350: 'CFS Mundulla Info',
1909351: 'CFS Padthaway Info',
1909352: 'CFS Senior Info',
1909353: 'CFS Sherwood Info',
1909354: 'CFS Western Flat Info',
1909355: 'CFS Willalooka Info',
1909356: 'CFS Wolseley Info',
1909357: 'CFS Wattle Range Group Officers Info',
1909358: 'CFS Wattle Range Group Info',
1909359: 'CFS Wattle Range Group Officers Response',
1909360: 'CFS Argyle Response',
1909361: 'CFS Beachport Response',
1909362: 'CFS Comaum Response',
1909363: 'CFS Coonawarra Response',
1909364: 'CFS Furner Response',
1909365: 'CFS Glencoe Response',
1909366: 'CFS Kalangadoo Response',
1909367: 'CFS Maaoupe Response',
1909368: 'CFS Millicent Response',
1909369: 'CFS Monbulla Response',
1909370: 'CFS Mount Burr Response',
1909371: 'CFS Mount Mcintyre Response',
1909372: 'CFS Nangwarry Response',
1909373: 'CFS Penola Response',
1909374: 'CFS Rendelsham Response',
1909375: 'CFS Southend Response',
1909376: 'CFS Tantanoola Response',
1909377: 'CFS Thornlea Response',
1909378: 'CFS Mount Graham Response',
1909379: 'CFS Wattle Range Group Operations Response',
1909380: 'CFS Argyle Info',
1909381: 'CFS Beachport Info',
1909382: 'CFS Comaum Info',
1909383: 'CFS Coonalpyn Info',
1909384: 'CFS Furner Info',
1909385: 'CFS Glencoe Info',
1909386: 'CFS Kalangadoo Info',
1909387: 'CFS Maaoupe Info',
1909388: 'CFS Millicent Info',
1909389: 'CFS Monbulla Info',
1909390: 'CFS Mount Burr Info',
1909391: 'CFS Mount Mcintyre Info',
1909392: 'CFS Nangwarry Info',
1909393: 'CFS Penola Info',
1909394: 'CFS Rendelsham Info',
1909395: 'CFS Southend Info',
1909396: 'CFS Tantanoola Info',
1909397: 'CFS Thornlea Info',
1909398: 'CFS Mount Graham Info',
1909399: 'CFS Wattle Range Group Operations Info',
1909432: 'CFS Gilbert Group Officers Info',
1909433: 'CFS Gilbert Group Info',
1909434: 'CFS Gilbert Group Officers Response',
1909435: 'CFS Auburn Response',
1909436: 'CFS Eudunda Response',
1909437: 'CFS Manoora Response',
1909438: 'CFS Marrabel Response',
1909439: 'CFS Neales Flat Response',
1909440: 'CFS Rhynie Response',
1909441: 'CFS Riverton Response',
1909442: 'CFS Robertstown Response',
1909443: 'CFS Saddleworth Response',
1909444: 'CFS Tarlee Response',
1909445: 'CFS Tothill Response',
1909446: 'CFS Waterloo Response',
1909447: 'CFS Auburn Info',
1909448: 'CFS Eudunda Info',
1909449: 'CFS Manoora Info',
1909450: 'CFS Marrabel Info',
1909451: 'CFS Neals Flat Info',
1909452: 'CFS Rhynie Info',
1909453: 'CFS Riverton Info',
1909454: 'CFS Robertstown Info',
1909455: 'CFS Saddleworth Info',
1909456: 'CFS Tarlee Info',
1909457: 'CFS Tothill Info',
1909458: 'CFS Waterloo Info',
1909515: 'CFS Bunbury Response',
1909518: 'CFS Coombe Response',
1909558: 'CFS On call 3',
1909787: 'CFS SOC Yvette Dowling',
1909800: 'CFS State Airdesk',
1909819: 'CFS Region 1 Duty Officer',
1910162: 'CFS Goolwa 24P',
1910221: 'CFS Region 1 Duty Officer',
1910955: 'CFS Mawson Deputy Group Officer 2',
1911131: 'CFS Morphett Vale Pager',
1911156: 'CFS Mawson Ops Pager 35',
1911204: 'CFS Turnout Response Display',
1911693: 'CFS On call 5',
1912320: 'CFS Region 2 Duty Officer',
1912326: 'CFS Region 2 Staff Info',
1912670: 'CFS On call 9',
1912739: 'CFS Forreston Siren 3 Phase Power Failure',
1912740: 'CFS Forreston Siren 3 Phase Power Failure',
1912852: 'CFS On call 2',
1912968: 'CFS On Call Air Operations',
1913743: 'CFS Region 6 Headquarters Info',
1914447: 'CFS Mount Gambier District Group Operations Response',
1914942: 'CFS On call 6',
1915088: 'CFS On call 7',
1919004: 'CFS On call 4',
1919095: 'CFS Northern Yorke Peninsula Group Officers Info',
1919096: 'CFS Northern Yorke Peninsula Group Officers Response',
1919097: 'CFS Alford Response',
1919098: 'CFS Bute Response',
1919099: 'CFS Cunliffe Response',
1919100: 'CFS Paskeville Response',
1919101: 'CFS South Hummocks Response',
1919103: 'CFS Bute Info',
1919107: 'CFS Southern Yorke Group Officers Info',
1919108: 'CFS Southern Yorke Group Info',
1919109: 'CFS Southern Yorke Group Officers Response',
1919110: 'CFS Brentwood Response',
1919111: 'CFS Corny Point Response',
1919112: 'CFS Curramulka Response',
1919113: 'CFS Edithburgh Response',
1919114: 'CFS Minlaton Response',
1919115: 'CFS Marion Bay Response',
1919116: 'CFS Port Vincent Response',
1919117: 'CFS Stansbury Response',
1919118: 'CFS Warooka Response',
1919119: 'CFS Brentwood Info',
1919120: 'CFS Corny Point Info',
1919121: 'CFS Curramulka Info',
1919122: 'CFS Edithburgh Info',
1919123: 'CFS Minlaton Info',
1919125: 'CFS Port Vincent Info',
1919127: 'CFS Yorke Valley Group Officers Info',
1919128: 'CFS Yorke Valley Group Info',
1919129: 'CFS Yorke Valley Group Officers Response',
1919131: 'CFS Ardrossan Response',
1919132: 'CFS Maitland Response',
1919133: 'CFS Port Clinton Response',
1919134: 'CFS Port Victoria Response',
1919135: 'CFS Balgowan Response',
1919136: 'CFS Weetulta / Nalyappa Response',
1919137: 'CFS Ardrossan Info',
1919138: 'CFS Maitland Info',
1919139: 'CFS Port Clinton Info',
1919140: 'CFS Port Victoria Info',
1919141: 'CFS Balgowan Info',
1919142: 'CFS Weetulta Info',
1919143: 'CFS Black Rock Group Officers Info',
1919144: 'CFS Black Rock Group Info',
1919145: 'CFS Black Rock Group Officers Response',
1919146: 'CFS Carrieton Response',
1919147: 'CFS Orroroo Response',
1919148: 'CFS Pekina Response',
1919149: 'CFS Peterborough Response',
1919150: 'CFS Yongala Response',
1919151: 'CFS Carrieton Info',
1919152: 'CFS Orroroo Info',
1919153: 'CFS Pekina Info',
1919154: 'CFS Peterborough Info',
1919155: 'CFS Yongala Info',
1919156: 'CFS Blyth / Snowtown Group Officers Info',
1919157: 'CFS Blyth / Snowtown Group Info',
1919158: 'CFS Blyth / Snowtown Group Officers Response',
1919159: 'CFS Blyth Response',
1919160: 'CFS Brinkworth Response',
1919161: 'CFS Hoyleton Response',
1919162: 'CFS Lochiel Response',
1919163: 'CFS Snowtown Response',
1919164: 'CFS Blyth Info',
1919165: 'CFS Brinkworth Info',
1919166: 'CFS Hoyleton Info',
1919167: 'CFS Lochiel Info',
1919168: 'CFS Snowtown Info',
1919169: 'CFS Bundaleer Group Officers Info',
1919170: 'CFS Bundaleer Group Info',
1919171: 'CFS Bundaleer Group Officers Response',
1919172: 'CFS Caltowie Response',
1919173: 'CFS Georgetown Response',
1919174: 'CFS Gladstone Response',
1919175: 'CFS Gulnare Response',
1919176: 'CFS Jamestown Response',
1919177: 'CFS Laura Response',
1919178: 'CFS Narridy Response',
1919179: 'CFS Yacka Response',
1919180: 'CFS Caltowie Info',
1919181: 'CFS Georgetown Info',
1919182: 'CFS Gladstone Info',
1919183: 'CFS Gulnare Info',
1919184: 'CFS Jamestown Info',
1919185: 'CFS Laura Info',
1919186: 'CFS Narridy Info',
1919187: 'CFS Yacka Info',
1919188: 'CFS Burra Group Officers Info',
1919189: 'CFS Burra Group Info',
1919190: 'CFS Burra Group Officers Response',
1919191: 'CFS Baldina Response',
1919192: 'CFS Booborowie Response',
1919193: 'CFS Burra Response',
1919194: 'CFS Farrell Flat Response',
1919195: 'CFS Mount Bryan Response',
1919196: 'CFS Baldina Info',
1919197: 'CFS Booborowie Info',
1919198: 'CFS Burra Info',
1919199: 'CFS Farrell Flat Info',
1919200: 'CFS Mount Bryan Info',
1919201: 'CFS Flinders Group Officers Info',
1919202: 'CFS Flinders Group Info',
1919203: 'CFS Flinders Group Officers Response',
1919204: 'CFS Hawker Response',
1919205: 'CFS Iron Knob Response',
1919206: 'CFS Quorn and District Response',
1919207: 'CFS Stirling North Response',
1919208: 'CFS Wilpena Response',
1919209: 'CFS Hawker Info',
1919210: 'CFS Iron Knob Info',
1919211: 'CFS Quorn and District Info',
1919212: 'CFS Stirling North Info',
1919213: 'CFS Wilpena Info',
1919214: 'CFS Hallett Group Officers Info',
1919215: 'CFS Hallett Group Info',
1919216: 'CFS Hallett Group Officers Response',
1919217: 'CFS Hallett Response',
1919218: 'CFS Spalding Response',
1919219: 'CFS Terowie Response',
1919220: 'CFS Whyte Yarcowie Response',
1919221: 'CFS Hallett Info',
1919222: 'CFS Spalding Info',
1919223: 'CFS Terowie Info',
1919224: 'CFS Whyte Yarcowie Info',
1919228: 'CFS Andamooka Response',
1919230: 'CFS Marree Response',
1919231: 'CFS Roxby Downs Response',
1919232: 'CFS Woomera Response',
1919238: 'CFS Mount Remarkable Group Officers Info',
1919239: 'CFS Mount Remarkable Group Info',
1919240: 'CFS Mount Remarkable Group Officers Response',
1919241: 'CFS Appila Response',
1919242: 'CFS Booleroo Response',
1919244: 'CFS Melrose Response',
1919245: 'CFS Port Germein Response',
1919246: 'CFS Wilmington Response',
1919247: 'CFS Wirrabara Response',
1919248: 'CFS Appila Info',
1919249: 'CFS Booleroo Info',
1919251: 'CFS Melrose Info',
1919252: 'CFS Port Germein Info',
1919253: 'CFS Wilmington Info',
1919254: 'CFS Wirrabara Info',
1919258: 'CFS Cockburn Response',
1919259: 'CFS Yunta Response',
1919262: 'CFS Spencer Group Officers Info',
1919263: 'CFS Spencer Group Info',
1919264: 'CFS Spencer Group Officers Response',
1919265: 'CFS Crystal Brook Response',
1919266: 'CFS Koolunga Response',
1919267: 'CFS Mundoora Response',
1919268: 'CFS Napperby Response',
1919269: 'CFS Port Broughton Response',
1919270: 'CFS Redhill Response',
1919271: 'CFS Wandearah Response',
1919272: 'CFS Wards Hill Response',
1919273: 'CFS Warnertown Response',
1919274: 'CFS Crystal Brook Info',
1919275: 'CFS Mundoora Info',
1919276: 'CFS Napperby Info',
1919277: 'CFS Port Broughton Info2',
1919278: 'CFS Port Broughton Info1',
1919279: 'CFS Wandearah Info',
1919284: 'CFS Region 4 Fire Ban Info',
1919286: 'CFS Region 4 Operations Brigade',
1919288: 'CFS Region 4 Headquarters Response',
1919289: 'CFS Region 4 Headquarters Info',
1919292: 'CFS Coober Pedy Response',
1919293: 'CFS Marla Response',
1919294: 'CFS Mintabie Response',
1919295: 'CFS Oodnadatta Response',
1919296: 'CFS Coober Pedy Info',
1919299: 'CFS Glendambo Response',
1919301: 'CFS Chaffey Group Officers Info',
1919302: 'CFS Chaffey Group Info',
1919303: 'CFS Chaffey Group Officers Response',
1919304: 'CFS Barmera Response',
1919305: 'CFS Browns Well Response',
1919306: 'CFS Glossop Response',
1919307: 'CFS Lyrup Response',
1919308: 'CFS Monash Response',
1919309: 'CFS Moorook Response',
1919310: 'CFS Paringa Response',
1919311: 'CFS Taplan Response',
1919312: 'CFS Wunkar Response',
1919313: 'CFS Barmera Info',
1919314: 'CFS Brown Wells Info',
1919315: 'CFS Glossop Info',
1919316: 'CFS Lyrup Info',
1919317: 'CFS Monash Info',
1919318: 'CFS Moorook Info',
1919319: 'CFS Paringa Info',
1919320: 'CFS Taplan Info',
1919321: 'CFS Wunkar Info',
1919323: 'CFS Coorong Group Info',
1919325: 'CFS Colebatch Response',
1919326: 'CFS Coomandook Response',
1919327: 'CFS Coonalpyn Response',
1919328: 'CFS Jabuk Response',
1919329: 'CFS Ki Ki Response',
1919330: 'CFS Netherton Response',
1919331: 'CFS Peake Response',
1919332: 'CFS Sherlock / Moorlands Response',
1919333: 'CFS Tintinara Response',
1919347: 'CFS Coorong Group Officers Response',
1919348: 'CFS Cooke Plains Response',
1919349: 'CFS Field Response',
1919350: 'CFS Meningie Response',
1919351: 'CFS Narrung Response',
1919352: 'CFS Salt Creek Response',
1919353: 'CFS Tailem Bend Response',
1919356: 'CFS Meningie Info',
1919359: 'CFS Tailem Bend Info',
1919362: 'CFS Mallee Group Officers Response',
1919363: 'CFS Bowhill Response',
1919364: 'CFS Galga Response',
1919365: 'CFS Geranium Response',
1919366: 'CFS Halidon and District Response',
1919367: 'CFS Karoonda Response',
1919368: 'CFS Kulkami / Marama Response',
1919369: 'CFS Lameroo Response',
1919370: 'CFS Parilla Response',
1919371: 'CFS Pinnaroo Response',
1919372: 'CFS Wynarka Response',
1919377: 'CFS Karoonda Info',
1919385: 'CFS Mid Murray Group Officers Response',
1919386: 'CFS Blanchetown Response',
1919387: 'CFS Cadell Response',
1919388: 'CFS Eastern Districts Response',
1919389: 'CFS Morgan Response',
1919390: 'CFS Waikerie Response',
1919392: 'CFS Cadell Info',
1919395: 'CFS Waikerie Info',
1919397: 'CFS Swanport Info',
1919398: 'CFS Swanport Group Officers Response',
1919399: 'CFS Callington Response',
1919400: 'CFS Ettrick Response',
1919401: 'CFS Jervois Response',
1919402: 'CFS Monarto Response',
1919403: 'CFS Murray Bridge Response',
1919404: 'CFS Mypolonga Response',
1919405: 'CFS Callington Info',
1919406: 'CFS Ettrick Info',
1919407: 'CFS Jervois Info',
1919408: 'CFS Monarto Info',
1919409: 'CFS Murray Bridge Info',
1919410: 'CFS Mypolonga Info',
1919412: 'CFS Ridley Group Info',
1919413: 'CFS Ridley Group Officers Response',
1919414: 'CFS Cambrai Response',
1919415: 'CFS Keyneton Response',
1919416: 'CFS Mannum Response',
1919417: 'CFS Palmer Response',
1919418: 'CFS Sedan Response',
1919419: 'CFS Swan Reach Response',
1919420: 'CFS Walker Flat Response',
1919422: 'CFS Keyneton Info',
1919423: 'CFS Mannum Info',
1919425: 'CFS Sedan Info',
1919427: 'CFS Walker Flat Info',
1919428: 'CFS Region 3 Weather Maildrop',
1919429: 'CFS Region 3 Fire Ban Info',
1919430: 'CFS Region 3 Info',
1919431: 'CFS Region 3 Operations Brigade',
1919433: 'CFS Region 3 Headquarters Response',
1919434: 'CFS Region 3 Headquarters Info',
1919437: 'CFS Western Eyre Group Officers Response',
1919438: 'CFS Ceduna Response',
1919439: 'CFS Eucla Response',
1919440: 'CFS Far West Response',
1919441: 'CFS Nunjikompita Response',
1919442: 'CFS Smoky Bay Response',
1919443: 'CFS Ceduna Info',
1919450: 'CFS Eastern Eyre Group Officers Response',
1919451: 'CFS Arno Bay Response',
1919452: 'CFS Cleve Response',
1919453: 'CFS Cowell Response',
1919454: 'CFS Darke Peak Response',
1919455: 'CFS Gum Flat',
1919456: 'CFS Mangalo Response',
1919457: 'CFS Rudall Response',
1919458: 'CFS Pondooma Response',
1919459: 'CFS Wharminda Response',
1919462: 'CFS Cowell Info',
1919471: 'CFS Elliston Group Officers Response',
1919472: 'CFS Elliston Response',
1919473: 'CFS Lock Response',
1919474: 'CFS Mount Wedge Response',
1919475: 'CFS Murdinga Response',
1919476: 'CFS Port Kenny Response',
1919477: 'CFS Sheringa Response',
1919478: 'CFS Tooligie Hills Response',
1919479: 'CFS Elliston Info',
1919480: 'CFS Lock Info',
1919488: 'CFS Caralue Group Officers Response',
1919489: 'CFS Buckleboo Response',
1919490: 'CFS Kimba Response',
1919491: 'CFS Waddikee Response',
1919492: 'CFS Yalanda James Response',
1919494: 'CFS Kimba Info',
1919500: 'CFS Cootra Response',
1919501: 'CFS Minnipa Response',
1919502: 'CFS Mount Damper Response',
1919503: 'CFS Warramboo Response',
1919504: 'CFS Wudinna Response',
1919511: 'CFS Lower Eyre Peninsula Group Info',
1919512: 'CFS Lower Eyre Peninsula Group Officers Response',
1919513: 'CFS Kapinnie Response',
1919514: 'CFS Coffin Bay Response',
1919515: 'CFS Coulta Response',
1919516: 'CFS Cummins Response',
1919517: 'CFS Edillilie Response',
1919518: 'CFS Greenpatch Response',
1919519: 'CFS Karkoo Response',
1919520: 'CFS Lincoln',
1919521: 'CFS Mount Hope Response',
1919522: 'CFS North Shields Response',
1919523: 'CFS Wangary Response',
1919524: 'CFS Wanilla Response',
1919525: 'CFS White Flat Response',
1919526: 'CFS Yeelana Response',
1919532: 'CFS Greenpatch Info',
1919534: 'CFS Lincoln Info',
1919536: 'CFS North Shields Info',
1919544: 'CFS Calca Response',
1919545: 'CFS Sceale Bay Response',
1919546: 'CFS Poochera and District Response',
1919547: 'CFS Streaky Bay Response',
1919548: 'CFS Wirrulla and District Response',
1919552: 'CFS Streaky Bay Info',
1919555: 'CFS Tumby Bay and District Group Info',
1919556: 'CFS Tumby Bay and District Group Officers Response',
1919558: 'CFS Butler Response',
1919559: 'CFS Koppio Response',
1919560: 'CFS Lipson Response',
1919574: 'CFS Region 6 Fire Ban Info',
1919575: 'CFS Region 6 Info',
1919920: 'CFS Region 4 Air Operations',
1921191: 'CFS Region 4 Regional Officer',
1921192: 'CFS Regional Officer Devine',
1921194: 'CFS Region 4 Duty Officer',
1921196: 'CFS Region 4 Regional Officer',
1922018: 'CFS Waikerie Info?',
1922304: 'CFS Region 3 Duty Officer',
1922305: 'CFS Region 3 Duty Officer',
1922306: 'CFS Region 3 Duty Officer',
1922307: 'CFS Region 3 Duty Officer',
1922308: 'CFS Region 3 Dawn Hunt',
1924239: 'CFS Region 1 Duty Officer',
1924771: 'CFS Port Neill Response',
1924773: 'CFS Tumby Bay Response',
1924776: 'CFS Ungarra Response',
1924777: 'CFS Yallunda Flat Response',
1924781: 'CFS On call 10',
1924784: 'CFS Region 6 Duty Officer',
1924811: 'CFS Region 6 Operations Brigade',
1924812: 'CFS Region 6 Operations Support',
1924957: 'CFS Mapping Support',
1925107: 'CFS Penong Response',
1926521: 'CFS Goolwa 34',
1926629: 'CFS Region 5 Regional Officer',
1926672: 'CFS On call 1',
1926957: 'CFS Region 4 Regional Officer',
1927165: 'CFS Rob Sanford',
1927189: 'CFS Headquarters Info',
1927244: 'CFS Leigh Creek',
1927353: 'CFS Region 2 Duty Officer',
1928735: 'CFS Region 1 Duty Officer',
1928797: 'CFS Region 2 Duty Officer',
1928798: 'CFS Region 2 Regional Officer',
1928882: 'CFS Region 2 Duty Officer',
1929097: 'CFS State IMT Info',
1929099: 'CFS Mount Lofty Group Operational Support',
1929146: 'CFS Tuckey Response',
1929423: 'CFS Myrtle Rd Siren',
1929424: 'CFS Cherry Gardens Station Siren',
1929425: 'CFS Coromandel Valley Siren',
1929428: 'CFS Eden Station Siren',
1929430: 'CFS Waite St Siren',
1930340: 'CFS Region 4 Regional Officer',
1930341: 'CFS State Headquarters Operations',
1930622: 'CFS Cockaleechie Response',
1930716: 'CFS On Call Media?',
1930733: 'CFS Region 6 Duty Officer',
1931052: 'CFS Region 2 Air Support',
1931109: 'CFS Commander McDonough',
1931113: 'CFS State Officer Shearer',
1931151: 'CFS Regional Officer Letcher',
1931198: 'CFS Rockleigh Response',
1931199: 'CFS Rockleigh Info',
1931232: 'CFS Victor Harbor Operations Info',
1931259: 'CFS Region 2 State Response Team',
1931264: 'CFS Region 2 State Response Team',
1931338: 'CFS Kevin Churchward',
1931477: 'CFS Region 4 Regional Officer',
1931484: 'CFS East Torrens Operations Support Brigade',
1931698: 'CFS Region 6 Duty Officer',
1931794: 'CFS Region 5 Duty Officer',
1932305: 'CFS State IMT Response',
1932385: 'CFS Region 1 Duty Officer',
1932726: 'CFS Para Reserve Brigade Response',
1932727: 'CFS Para Reserve Info',
1932942: 'CFS Region 1 Hold',
1932943: 'CFS Region 2 Hold',
1932945: 'CFS R4 HOLD',
1932973: 'CFS Region 3 Duty Officer',
# MFS
1800096: 'MFS Paradise Station',
1800125: 'MFS Paradise PDS221',
1800159: 'MFS Angle Park APK361',
1800160: 'MFS Brooklyn Park BPK451',
1800161: 'MFS Camden Park CPK411',
1800162: 'MFS Christie Downs CDN431',
1800163: 'MFS Christie Downs CDN439',
1800164: 'MFS Elizabeth ELZ331',
1800165: 'MFS Elizabeth ELZ339',
1800166: 'MFS Gawler GAW359',
1800167: 'MFS GlenOsmond GLO449',
1800168: 'MFS Golden Grove GGV311',
1800169: 'MFS Largs North LGS281',
1800170: 'MFS Oakden OAK301',
1800171: 'MFS Oakden OAK303',
1800172: 'MFS OHalloran Hill OHH421',
1800173: 'MFS Port Adelaide PAD251',
1800174: 'MFS Prospect PPT371',
1800175: 'MFS Salisbury SAL321',
1800176: 'MFS Salisbury SAL329',
1800177: 'MFS St Marys STM401',
1800178: 'MFS St Marys STM409',
1800179: 'MFS Woodville WDV243',
1800180: 'MFS Woodville WDV249',
1800181: 'MFS Adelaide ADL201',
1800182: 'MFS Adelaide ADL203',
1800183: 'MFS Adelaide ADL202',
1800184: 'MFS Adelaide ADL204',
1800185: 'MFS Adelaide ADL205',
1800186: 'MFS Adelaide ADL206',
1800188: 'MFS Adelaide 205 or 2015',
1905655: 'MFS Murray Bridge Response',
1905665: 'MFS Victor Harbor Response',
1905672: 'MFS Tanunda Response',
1905679: 'MFS Kapunda Response',
1905686: 'MFS Mt Gambier Response',
1918534: 'MFS Berri Response',
1918562: 'MFS Kadina Response',
1918570: 'MFS Loxton Response',
1918578: 'MFS Moonta Response',
1918586: 'MFS Peterborough Response',
1918593: 'MFS Port Augusta Response',
1918604: 'MFS Port Lincoln Repsonse',
1918612: 'MFS Port Lincoln Info',
1918613: 'MFS Port Pirie',
1918616: 'MFS Renmark Response',
1918624: 'MFS Wallaroo Response',
1918631: 'MFS Whyalla Response',
1919072: 'MFS Car 31',
1919074: 'MFS Car 41',
1919094: 'MFS Largs Bay LG2814 Marine Crew',
1925389: 'MFS Port Pirie',
1930428: 'MFS Adelaide Station',
1930429: 'MFS Beulah Park Station',
1930432: 'MFS',
1930660: 'MFS Beulah Park BLP211',
1931039: 'MFS Seaford Response',
1931040: 'MFS Seaford Response',
1931410: 'MFS Woodville Station',
1931411: 'MFS Port Adelaide Station',
1931412: 'MFS Largs North Station',
1931413: 'MFS Oakden Station',
1931414: 'MFS Golden Grove Station',
1931415: 'MFS Salisbury Station',
1931416: 'MFS Elizabeth Station',
1931418: 'MFS Angle Park Station',
1931419: 'MFS Prospect Station',
1931420: 'MFS St Marys Station',
1931421: 'MFS Camden Park Station',
1931422: 'MFS Christie Downs Station',
1931423: 'MFS OHalloran Hill Station',
1931424: 'MFS Brooklyn Park Station',
1931425: 'MFS GlenOsmond Station',
1931434: 'MFS Senior Management Group',
1931447: 'MFS Gawler Station',
1931738: 'MFS TC382',
1919072: 'MFS Car 30',
1931417: 'MFS Unknown',
1925389: 'MFS Port Pirie Station',
1930431: 'MFS Commander Button',
1918612: 'MFS Port Lincoln Info',
1919060: 'MFS Safety Officer Prime',
1800170: 'MFS Oakden 301',
1931412: 'MFS Largs North Station',
1931039: 'MFS Seaford Station',
1919084: 'MFS Fire Cause',
1800189: 'MFS Adelaide 2090',
1800160: 'MFS Brooklyn Park 451',
1800173: 'MFS Port Adelaide 257',
1800071: 'MFS Adrian Puust',
1800165: 'MFS Elizabeth 339',
1926883: 'MFS Safety Officer Foster',
1931422: 'MFS Christie Downs Station',
1800178: 'MFS St Marys 409',
1919036: 'MFS Regional Commanders All',
1800179: 'MFS Woodville 243',
1931668: 'MFS TC3811',
1930432: 'MFS Officer Mason',
1932660: 'MFS Car 20',
1918616: 'MFS Renmark Station',
1918613: 'MFS Port Pirie',
1905665: 'MFS Victor Harbor Station',
1918604: 'MFS Port Lincoln Station',
1800185: 'MFS Adelaide 205',
1905672: 'MFS Tanunda Station',
1919086: 'MFS Fire Cause',
1925390: 'MFS Port Pirie Station',
1932953: 'MFS Safety Officer Reynold',
1918578: 'MFS Moonta Station',
1800188: 'MFS Adelaide 205/15',
1931040: 'MFS Seaford 469',
1931413: 'MFS Oakden Station',
1800172: "MFS O'Halloran Hill 429",
1931447: 'MFS Gawler Station',
1800162: 'MFS Christie Downs 433',
1925033: 'MFS Media',
1800169: 'MFS Largs North 281',
1931411: 'MFS Port Adelaide Station',
1800166: 'MFS Gawler 359',
1931962: 'MFS Safety Officer Matters',
1800176: 'MFS Salisbury 329',
1800184: 'MFS Adelaide 204',
1800096: 'MFS Paradise Station',
1918586: 'MFS Peterborough Station',
1932593: 'MFS Urban Search & Rescue',
1918593: 'MFS Port Augusta Station',
1932271: 'MFS Regional Commander',
1918570: 'MFS Loxton Station',
1919094: 'MFS Largs North 2814',
1905655: 'MFS Murray Bridge Station',
1919074: 'MFS Car 40',
1931415: 'MFS Salisbury Station',
1926876: 'MFS Acfo Smith',
1918631: 'MFS Whyalla Station',
1800171: 'MFS Oakden 303',
1800164: 'MFS Elizabeth 331',
1800181: 'MFS Adelaide 201',
1931421: 'MFS Camden Park Station',
1930996: 'MFS Regional Commander',
1919045: 'MFS Regional Commander',
1931635: 'MFS Safety Officer',
1919070: 'MFS Fire Cause',
1930430: 'MFS Elizabeth 331',
1932785: 'MFS Fire Cause',
1932700: 'MFS Safety Officer',
1930429: 'MFS Beulah Park Station',
1905679: 'MFS Kapunda Station',
1800175: 'MFS Salisbury 321',
1800180: 'MFS Woodville 249',
1919053: 'MFS Officer Eckerman',
1918534: 'MFS Berri Station',
1800125: 'MFS Paradise 229',
1931667: 'MFS Safety Officer',
1918562: 'MFS Kadina Station',
1931416: 'MFS Elizabeth Station',
1931418: 'MFS Angle Park Station',
1919064: 'MFS Adrian Benham',
1931169: 'MFS Adelaide Hook Lift Truck',
1926887: 'MFS CBR Pod',
1919082: 'MFS Officer Kilsby',
1930448: 'MFS Regional Commander',
1931425: 'MFS Glen Osmond Station',
1930428: 'MFS Adelaide Station',
1929264: 'MFS Elizabeth 331',
1931419: 'MFS Prospect Station',
1800163: 'MFS Christie Downs 439',
1918624: 'MFS Wallaroo Station',
1800174: 'MFS Prospect 377',
1931738: 'MFS TC382',
1931410: 'MFS Woodville Station',
1800177: 'MFS St Marys 401',
1931424: 'MFS Brooklyn Park Station',
1929260: 'MFS Adelaide CBR Pod',
1931491: 'MFS Headquarters Duty Officer',
1800161: 'MFS Camden Park 417',
1800218: 'MFS Safety Officer',
1930660: 'MFS Beulah Park 217',
1800183: 'MFS Adelaide 202',
1929258: 'MFS Fire Cause',
1931423: "MFS O'Halloran Hill Station",
1800168: 'MFS Golden Grove 311',
1800182: 'MFS Adelaide 203',
1929622: 'MFS Mount Gambier',
1800187: 'MFS C1 Thompr',
1929263: 'MFS Safety Officer Lindsy',
1930656: 'MFS Beulah Park 217',
1800167: 'MFS Glen Osmond 449',
1905686: 'MFS Mount Gambier Station',
1929262: 'MFS Acfo Morgan',
1931171: 'MFS Adelaide Hook Lift Truck',
1930968: 'MFS Officer Staple',
1931434: 'MFS Senior Management Group',
1926878: 'MFS Safety Officer',
1931414: 'MFS Golden Grove Station',
1926890: 'MFS Safety Officer',
1800186: 'MFS Adelaide 206',
1931669: 'MFS TC389',
1800159: 'MFS Angle Park 361',
1929710: 'MFS Officer Dawes',
1931420: 'MFS St Marys Station',
# SAAS
1925583: 'SAAS Unit Aldinga',
1916063: 'SAAS Unit Port Pirie',
1916048: 'SAAS Unit Minlaton',
1927493: 'SAAS Operations Manager South',
1932779: 'SAAS Community Paramedic',
1916061: 'SAAS Unit Port Lincoln',
1916871: 'SAAS Unit Wallaroo',
1908553: 'SAAS Unit Kingston',
1926145: 'SAAS Unit Port Kenny',
1931797: 'SAAS Doctor Nuriootpa',
1931636: 'SAAS Doctor Penneshaw',
1932349: 'SAAS Major Events',
1925620: 'SAAS Unit Gepps Cross',
1932502: 'SAAS Unit Oakden',
1916066: 'SAAS Unit Port Pirie',
1932348: 'SAAS Unit Glengowrie',
1931900: 'SAAS Unit Oakden',
1916023: 'SAAS Unit Lameroo',
1908537: 'SAAS Unit Coonalpyn',
1916026: 'SAAS Unit Booleroo Centre',
1916204: 'SAAS Unit Ceduna',
1930372: 'SAAS Unit Sierra Lima',
1929698: 'SAAS Unit Mclaren Vale',
1916948: 'SAAS Operations Manager North',
1931653: 'SAAS Special Operations Team',
146750: 'SAAS Unit SARRT Adelaide North',
1931618: 'SAAS Operations Manager South',
1925591: 'SAAS Unit Campbelltown',
1925560: 'SAAS Unit Edwardstown',
1931619: 'SAAS Operations Manager South',
1916010: 'SAAS Unit Kapunda',
1916008: 'SAAS Unit Berri',
1916107: 'SAAS Unit Yorketown',
1932699: 'SAAS Unit Oakden',
1932586: 'SAAS Unit Hi707 (ICP Solo Responder)',
1928863: 'SAAS Unit Gepps Cross',
1931359: 'SAAS Doctor Cowell',
1933169: 'SAAS Unit Campbelltown',
1908549: 'SAAS Extended Care Paramedic',
1931624: 'SAAS Doctor Snowtown',
1916098: 'SAAS Unit Whyalla',
1931042: 'Medstar 70',
1916064: 'SAAS Unit Port Pirie',
1931613: 'SAAS Doctor Kapunda',
1906071: 'SAAS Unit Meadows',
1929882: 'SAAS Unit Clinical Support',
1916060: 'SAAS Unit Port Lincoln',
1926871: 'SAAS Unit Murray Bridge',
1931660: 'SAAS Major Events',
1932956: 'SAAS Operation Manager North',
1916039: 'SAAS Unit Gladstone',
1916057: 'SAAS Unit Port Broughton',
1908651: 'SAAS Unit Bordertown',
1916015: 'SAAS Unit Renmark',
1916073: 'SAAS Unit Murray Bridge',
1929745: 'SAAS Unit Lima Wiskey',
1916683: 'SAAS Unit Penneshaw',
1909687: 'SAAS Operations Manager South',
1933111: 'SAAS Unit Oakden',
1800144: 'SAAS Unit Coober Pedy',
1908538: 'SAAS Unit Bordertown',
1931351: 'SAAS Doctor Tanunda',
1916074: 'SAAS Unit Murray Bridge',
1929669: 'SAAS Unit Gepps Cross',
1924262: 'SAAS Unit Warooka',
1916053: 'SAAS Unit Port Augusta',
1916095: 'SAAS Unit Streaky Bay',
1916083: 'SAAS Unit Victor Harbor',
1930810: 'SAAS Extended Care Paramedic',
1931007: 'Medstar 81',
1916012: 'SAAS Unit Mannum',
1932508: 'SAAS Unit Salisbury',
1930794: 'SAAS Extended Care Paramedic',
1929090: 'SAAS Community Responder Port Victoria',
1931068: 'Medstar 54',
1931210: 'SAAS Major Events',
1932478: 'SAAS Unit LO1 (Southern Team Leader)',
1908542: 'SAAS Unit Penola',
1931360: 'SAAS Doctor Port Broughton',
1916710: 'SAAS Unit Strathalbyn',
1915597: 'SAAS Unit Woodside',
1916943: 'SAAS Operations Manager North',
1932227: 'SAAS Extended Care Paramedic',
1932796: 'SAAS Unit Ceduna',
1924261: 'SAAS Unit My400',
1927118: 'SAAS Unit Wallaroo',
1916029: 'SAAS Unit Burra',
1930440: 'SAAS SPRINT Car',
1928918: 'SAAS Operations Manager South',
1930444: 'SAAS SPRINT Car',
1931066: 'Medstar 94',
1916947: 'SAAS Operations Manager North',
1932649: 'SAAS Unit Clinical Support',
1916058: 'SAAS Unit Port Lincoln',
1909686: 'SAAS Operations Manager South',
1933170: 'SAAS Unit Noarlunga',
1931621: 'SAAS CEO Jason Killens',
1931352: 'SAAS Doctor Robe',
1930442: 'SAAS SPRINT Car',
1931446: 'SAAS Unit Lima Oscar',
1931350: 'SAAS Doctor Mannum',
1930777: 'SAAS Unit Salisbury',
1931913: 'SAAS Unit Parkside',
1916049: 'SAAS Unit Moonta',
1931209: 'SAAS Major Events',
1925571: 'SAAS Unit Redwood Park',
1932609: 'SAAS Unit Coober Pedy',
1916042: 'SAAS Unit Kadina',
1931344: 'SAAS Doctor Waikerie',
1925584: 'SAAS Unit Ashford',
1929676: 'SAAS Clinical Support',
1930458: 'SAAS Unit Mclaren Vale',
1916050: 'SAAS Unit Orroroo',
1931045: 'Medstar 180',
1925609: 'SAAS Unit Fulham',
1931826: 'SAAS Unit Millicent',
1916557: 'SAAS Unit Port Augusta',
1916556: 'SAAS Unit Port Augusta',
1925619: 'SAAS Unit Gepps Cross',
1908546: 'SAAS Unit Mount Gambier',
1929307: 'SAAS Unit Oodnadatta',
1916591: 'SAAS Unit Port Pirie',
1800141: 'SAAS Unit Ceduna',
1925590: 'SAAS Unit Campbelltown',
1931201: 'SAAS Major Events',
1930813: 'SAAS Extended Care Paramedic',
1916090: 'SAAS Unit Yankalilla',
1916099: 'SAAS Unit Whyalla',
1927377: 'SAAS Unit Port MacDonnell',
1930706: 'SAAS Operations Manager South',
1916555: 'SAAS Unit Port Augusta',
1925606: 'SAAS Unit Edwardstown',
1930439: 'SAAS SPRINT Car',
1931044: 'Medstar 54',
1933120: 'SAAS Unit Salisbury',
1932519: 'RAH Discharge Lounge',
1916078: 'SAAS Unit Penneshaw',
1925569: 'SAAS Unit Edwardstown',
1924810: 'SAAS Unit Port Neill',
1800065: 'SAAS Unit Port Augusta',
1931008: 'Medstar 181',
1925567: 'SAAS Unit Noarlunga',
1908540: 'SAAS Unit Keith',
1931663: 'SAAS Major Events',
1925622: 'SAAS Unit Gepps Cross',
1932369: 'SAAS Major Events',
1925563: 'SAAS Unit Gepps Cross',
1931897: 'SAAS Unit Prospect',
1925617: 'SAAS Unit Gepps Cross',
1925612: 'SAAS Unit Fulham',
1916156: 'SAAS Unit Whyalla',
1930630: 'SAAS Unit Transport Bus',
1929672: 'SAAS Unit Salisbury',
1916047: 'SAAS Unit Marla',
1933060: 'SAAS Unit SARRT Roaming Crew',
1916580: 'SAAS Unit Port Lincoln',
1908552: 'SAAS Unit Robe',
1933173: 'SAAS Unit Noarlunga',
1916034: 'SAAS Unit Coober Pedy',
1925616: 'SAAS Unit Fulham',
1932490: 'SAAS Unit Parkside',
1916052: 'SAAS Unit Port Augusta',
1917141: 'SAAS Unit Lima Echo',
1933123: 'SAAS Unit Salisbury',
1916091: 'SAAS Unit American River',
1930812: 'SAAS Unit Prospect',
1908543: 'SAAS Unit Nangwarry',
1929675: 'SAAS Clinical Support',
1925573: 'SAAS Unit Noarlunga',
1916043: 'SAAS Unit Kimba',
1908785: 'SAAS Regional Team Leader Fleurieu Peninsula',
1929128: 'SAAS Community Responder Port Victoria',
1908547: 'SAAS Unit Mount Gambier',
1932361: 'SAAS Major Events',
1908784: 'SAAS Operations Manager North',
1929246: 'SAAS Unit Naracoorte',
1931906: 'SAAS Unit Parkside',
1926021: 'SAAS Unit Mount Gambier',
1932354: 'SAAS Unit Salisbury',
1929744: 'SAAS Unit Lima Sierra',
1931064: 'Medstar 70',
1929127: 'SAAS Community Responder Port Victoria',
1908781: 'SAAS Operations Manager South',
1931365: 'SAAS Extended Care Paramedic',
1909714: 'SAAS Unit Waikerie',
1924690: 'SAAS Unit Gawler',
1925613: 'SAAS Unit Fulham',
1916946: 'SAAS Operations Manager South',
1931638: 'SAAS Special Operations Team',
1919580: 'SAAS Unit Port Lincoln',
1931013: 'SAAS Unit Berri',
1931583: 'Medstar 90',
1908536: 'SAAS Unit Tintinara',
1931354: 'SAAS Doctor Kadina',
1925576: 'SAAS Unit Brooklyn Park',
1931362: 'SAAS Doctor Penneshaw',
1916033: 'SAAS Unit Coffin Bay',
1916105: 'SAAS Unit Wudinna',
1915607: 'SAAS Unit Mount Barker',
1925588: 'SAAS Unit Mitcham',
1916584: 'SAAS Unit Port Lincoln',
1923158: 'SAAS Operations Manager North',
1928881: 'SAAS Unit Mount Barker',
1916020: 'SAAS Unit Hamley Bridge',
1916051: 'SAAS Unit Peterborough',
1916009: 'SAAS Unit Eudunda',
1916071: 'SAAS Unit Meadows',
1931046: 'Medstar 210',
1931586: 'Medstar 210',
1930443: 'SAAS SPRINT Car',
1925574: 'SAAS Unit Noarlunga',
1931358: 'SAAS Doctor Tumby Bay',
1932511: 'SAAS Unit Salisbury',
1916079: 'SAAS Unit Strathalbyn',
1931012: 'Medstar 94',
1931602: 'SAAS Operations Manager South',
1925598: 'SAAS Unit Salisbury',
1927147: 'SAAS Unit Bariatric Truck',
1927167: 'SAAS Unit Playford',
1930457: 'SAAS Unit Fulham',
1916094: 'SAAS Unit Snowtown',
1931445: 'SAAS Unit Beachport',
1925596: 'SAAS Unit Salisbury',
1916590: 'SAAS Unit Port Pirie',
1916069: 'SAAS Unit Port Wakefield',
1930815: 'SAAS Unit Port Adelaide',
1930224: 'SAAS Unit Fulham',
1916038: 'SAAS Unit Elliston',
1931339: 'SAAS Unit Motorbike Patrol',
1916942: 'SAAS Operations Manager North',
1929674: 'SAAS Clinical Support',
1916054: 'SAAS Unit Port Augusta',
1908545: 'SAAS Unit Naracoorte',
146757: 'SAAS Unit SARRT Riverland',
1931614: 'SAAS Unit Renmark',
1930441: 'SAAS SPRINT Car',
1916013: 'SAAS Unit Morgan',
1931520: 'SAAS Unit Fulham',
1925562: 'SAAS Unit Gepps Cross',
1932351: 'SAAS Unit Port Adelaide',
146754: 'SAAS Unit SARRT Port Pirie',
1930792: 'SAAS Unit Driver Training',
1916030: 'SAAS Unit Ceduna',
1930818: 'SAAS Unit Mount Barker',
1925615: 'SAAS Unit Gepps Cross',
1931202: 'SAAS Major Events',
1916750: 'SAAS Unit Woodside',
1800064: 'SAAS Operations Manager North',
1930567: 'SAAS Psychiatric Transfer Assistant',
1916596: 'SAAS Operations Manager North',
1916018: 'SAAS Unit Goolwa',
1916967: 'SAAS Unit Berri',
1932479: 'SAAS Unit LP1 (Northern Team Leader)',
1931404: 'SAAS Team Leader',
1925579: 'SAAS Unit Port Adelaide',
1930456: 'SAAS Unit Fulham',
1932484: 'SAAS Unit Port Adelaide',
1931002: 'Medstar 208',
1931921: 'SAAS Doctor Renmark',
1931047: 'Medstar 55',
1931357: 'SAAS Doctor Kimba',
1927413: 'SAAS Unit CR AC',
1916021: 'SAAS Unit Karoonda',
1931347: 'SAAS Doctor Murray Bridge',
1908750: 'SAAS Unit Robe',
1916968: 'SAAS Unit Berri',
1916037: 'SAAS Unit Cummins',
1925605: 'SAAS Unit Fulham',
1916944: 'SAAS Operations Manager North',
1929007: 'SAAS Clinical Support',
1925614: 'SAAS Unit Fulham',
1930780: 'SAAS Unit Noarlunga',
1932230: 'SAAS Doctor Naracoorte',
1931000: 'Medstar 35',
1908544: 'SAAS Unit Millicent',
1931011: 'SAAS Doctor Goolwa',
1925586: 'SAAS Unit Parkside',
1916093: 'SAAS Unit Roxby Downs',
1925587: 'SAAS Unit Parkside',
1931340: 'SAAS Unit Motorbike Patrol',
1916036: 'SAAS Unit Crystal Brook',
1909704: 'SAAS Unit Renmark',
146758: 'SAAS Unit SARRT Eyre and Western',
1925595: 'SAAS Unit Prospect',
1916202: 'SAAS Unit Ceduna',
1916558: 'SAAS Unit Port Augusta',
1925568: 'SAAS Unit Edwardstown',
1916072: 'SAAS Unit Mount Pleasant',
1916579: 'SAAS Unit Port Lincoln',
1931346: 'SAAS Doctor Penola',
1931637: 'SAAS Doctor Lameroo',
1926225: 'SAAS Unit Salisbury',
1926948: 'SAAS Unit Lima Victor',
1931348: 'SAAS Doctor Booleroo',
1924688: 'SAAS Unit Gawler',
1932360: 'SAAS Unit Seaford',
1916089: 'SAAS Unit Woodside',
1925572: 'SAAS Unit Noarlunga',
1930493: 'SAAS Unit Mount Gambier',
1916044: 'SAAS Unit Leigh Creek',
1932229: 'SAAS Extended Care Paramedic',
1916081: 'SAAS Unit Victor Harbor',
1933166: 'SAAS Unit Campbelltown',
1916082: 'SAAS Unit Victor Harbor',
1930817: 'SAAS Unit Gepps Cross',
1908535: 'SAAS Unit Meningie',
1908783: 'SAAS Unit Driver Training',
1916017: 'SAAS Unit Waikerie',
146755: 'SAAS Unit SARRT Far North',
1933114: 'SAAS Unit Oakden',
1925610: 'SAAS Unit Parkside',
1931666: 'SAAS Unit Glengowrie',
1916007: 'SAAS Unit Barmera',
1931361: 'SAAS Doctor Nuriootpa',
1924307: 'SAAS Operations Manager North',
1908551: 'SAAS Unit Lucindale',
1932487: 'SAAS Unit Prospect',
1924962: 'SAAS MVA / Other Emerg',
1925577: 'SAAS Unit Fulham',
1916045: 'SAAS Unit Lock',
1916084: 'SAAS Unit Victor Harbor',
1925600: 'SAAS Unit Prospect',
1930999: 'Medstar 107',
1916019: 'SAAS Unit Parndana',
1908548: 'SAAS Unit Mount Gambier',
1925597: 'SAAS Unit Salisbury',
1909703: 'SAAS Unit Barossa Valley',
1932780: 'SAAS Community Paramedic',
1916097: 'SAAS Unit Wallaroo',
1927694: 'SAAS Unit Clinical Support',
1916055: 'SAAS Unit Port Augusta',
1927495: 'SAAS Operations Manager North',
1927148: 'SAAS Unit Fulham',
1925611: 'SAAS Unit Fulham',
1916046: 'SAAS Unit Maitland',
1927697: 'SAAS Unit Millicent',
1908550: 'SAAS Unit Mount Gambier',
1800051: 'SAAS Unit Gawler',
1928071: 'SAAS State Duty Manager',
1925582: 'SAAS Unit Port Adelaide',
1932497: 'SAAS Unit Playford',
1925575: 'SAAS Unit Camden Park',
1932682: 'SAAS Unit Modbury Hospital',
1916006: 'SAAS Unit Barossa Valley',
1925564: 'SAAS Unit Port Adelaide',
1930998: 'Medstar 80',
1916096: 'SAAS Unit Tumby Bay',
1928979: 'SAAS Operations Manager North',
1925621: 'SAAS Unit Gepps Cross',
1931345: 'SAAS Doctor Yankalilla',
1932496: 'SAAS Unit Marion',
1925585: 'SAAS Unit Ashford',
1916070: 'SAAS Unit Mallala',
1924334: 'SAAS Unit Riverton',
1909702: 'SAAS Unit Barossa Valley',
1924044: 'SAAS Unit Stirling',
1926129: 'SAAS Clinical Support',
1931569: 'SAAS Special Operations Team',
1925581: "SAAS Unit O'Halloran Hill",
1933161: 'SAAS Unit Oakden',
1916024: 'SAAS Unit Ardrossan',
1930820: 'SAAS Unit Oakden',
1925589: 'SAAS Unit Redwood Park',
1931657: 'SAAS Unit Goolwa',
1932782: 'SAAS Community Paramedic',
1925592: 'SAAS Unit Campbelltown',
1925578: 'SAAS Unit Fulham',
1924063: 'SAAS Unit Stirling',
1909688: 'SAAS Unit Barossa Valley',
1916011: 'SAAS Unit Loxton',
1931001: 'Medstar 210',
1916755: 'SAAS Unit Woodside',
1925594: 'SAAS Unit Prospect',
1926228: 'SAAS Unit Edwardstown',
1932797: 'SAAS Unit Mount Gambier ACTL',
1931696: 'SAAS Extended Care Paramedic',
1929742: 'SAAS Unit Lima Echo',
1916032: 'SAAS Unit Cleve',
1931203: 'SAAS Major Events',
1925618: 'SAAS Unit Gepps Cross',
1916185: 'SAAS Unit Burra',
1928957: 'SAAS Operations Manager',
1916022: 'SAAS Unit Kingscote',
1931349: 'SAAS Doctor Caltowie',
1907760: 'SAAS Unit Coomandook',
1931356: 'SAAS Doctor Quorn',
1931003: 'Medstar 94',
1916108: 'SAAS Unit Yunta',
1909701: 'SAAS Unit Loxton',
1927115: 'SAAS Unit Wallaroo',
1925593: 'SAAS Unit Playford',
1932588: 'SAAS Unit Hi708 (ICP Solo Responder)',
1916102: 'SAAS Unit Whyalla',
1928861: 'SAAS Operations Manager North',
1925580: 'SAAS Unit Marion',
1932505: 'SAAS Unit Salisbury',
1931694: 'SAAS Extended Care Paramedic',
1907898: 'SAAS Clinical Support',
1916014: 'SAAS Unit Pinaroo',
1931903: 'SAAS Unit Campbelltown',
1927485: 'SAAS Operations Manager',
1908711: 'SAAS Unit Naracoorte',
1929844: 'SAAS Doctor Normanville',
1916031: 'SAAS Unit Clare',
1925566: 'SAAS Unit Port Adelaide',
1930819: 'SAAS Unit Salisbury',
1908554: 'SAAS Unit Padthaway',
1931043: 'Medstar 90',
1916035: 'SAAS Unit Cowell',
1916101: 'SAAS Unit Whyalla',
1916040: 'SAAS Unit Hawker',
1925624: 'SAAS Unit Edwardstown',
1916080: 'SAAS Unit Tailem Bend',
1925565: 'SAAS Unit Salisbury',
1932264: 'SAAS Operations Manager South',
1916025: 'SAAS Unit Balaklava',
1932781: 'SAAS Unit C04',
1925561: 'SAAS Unit Edwardstown',
1932357: 'SAAS Major Events',
1931060: 'Medstar 180',
1909683: 'SAAS Unit Barossa Valley',
1932265: 'SAAS Unit Murray Bridge',
1932799: 'SAAS Unit Oakden',
1932493: 'SAAS Unit Marion',
1916041: 'SAAS Unit Jamestown',
1929743: 'SAAS Unit Lima November',
1931790: 'SAAS Unit Goolwa',
1916866: 'SAAS Unit Wallaroo',
146752: 'SAAS Unit SARRT Murray Coorong',
1916067: 'SAAS Unit Port Pirie',
1800147: 'SAAS Unit Fleurieu Peninsula',
1927491: 'SAAS Operations Manager South',
1931568: 'SAAS Special Operations Team',
1930197: 'SAAS Unit Salt Creek',
1926227: 'SAAS Unit Ashford',
1932648: 'SAAS Doctor Clare',
1916016: 'SAAS Unit Swan Reach',
1925570: 'SAAS Unit Noarlunga',
1931067: 'Medstar 48',
1928839: 'SAAS Unit Gepps Cross',
1916092: 'SAAS Unit Quorn',
1931343: 'SAAS Doctor Millicent',
1925623: 'SAAS Unit Gepps Cross',
1932659: 'SAAS Unit Strathalbyn',
1927117: 'SAAS Unit Gawler',
1930816: 'SAAS Unit Mitcham',
1924306: 'SAAS Operations Manager North',
1930234: 'SAAS Unit Papa Sierra',
1933185: 'SAAS Unit Noarlunga',
1932364: 'SAAS Operations Manager South',
1929717: 'SAAS Unit Team Leader',
1930811: 'SAAS Extended Care Paramedic',
1909713: 'SAAS Unit Berri',
1916076: 'SAAS Unit Murray Bridge',
1916100: 'SAAS Unit Whyalla',
1925583: 'SAAS Unit Aldinga',
1916063: 'SAAS Unit Port Pirie',
1916048: 'SAAS Unit Minlaton',
1927493: 'SAAS Operations Manager South',
1932779: 'SAAS Community Paramedic',
1916061: 'SAAS Unit Port Lincoln',
1916871: 'SAAS Unit Wallaroo',
1908553: 'SAAS Unit Kingston',
1926145: 'SAAS Unit Port Kenny',
1931797: 'SAAS Doctor Nuriootpa',
1931636: 'SAAS Doctor Penneshaw',
1932349: 'SAAS Major Events',
1925620: 'SAAS Unit Gepps Cross',
1932502: 'SAAS Unit Oakden',
1916066: 'SAAS Unit Port Pirie',
1932348: 'SAAS Unit Glengowrie',
1931900: 'SAAS Unit Oakden',
1916023: 'SAAS Unit Lameroo',
1908537: 'SAAS Unit Coonalpyn',
1916026: 'SAAS Unit Booleroo Centre',
1916204: 'SAAS Unit Ceduna',
1930372: 'SAAS Unit Sierra Lima',
1929698: 'SAAS Unit Mclaren Vale',
1916948: 'SAAS Operations Manager North',
1931653: 'SAAS Special Operations Team',
146750: 'SAAS Unit SARRT Adelaide North',
1931618: 'SAAS Operations Manager South',
1925591: 'SAAS Unit Campbelltown',
1925560: 'SAAS Unit Edwardstown',
1931619: 'SAAS Operations Manager South',
1916010: 'SAAS Unit Kapunda',
1916008: 'SAAS Unit Berri',
1916107: 'SAAS Unit Yorketown',
1932699: 'SAAS Unit Oakden',
1932586: 'SAAS Unit Hi707 (ICP Solo Responder)',
1928863: 'SAAS Unit Gepps Cross',
1931359: 'SAAS Doctor Cowell',
1933169: 'SAAS Unit Campbelltown',
1908549: 'SAAS Extended Care Paramedic',
1931624: 'SAAS Doctor Snowtown',
1916098: 'SAAS Unit Whyalla',
1931042: 'Medstar 70',
1916064: 'SAAS Unit Port Pirie',
1931613: 'SAAS Doctor Kapunda',
1906071: 'SAAS Unit Meadows',
1929882: 'SAAS Unit Clinical Support',
1916060: 'SAAS Unit Port Lincoln',
1926871: 'SAAS Unit Murray Bridge',
1931660: 'SAAS Major Events',
1932956: 'SAAS Operation Manager North',
1916039: 'SAAS Unit Gladstone',
1916057: 'SAAS Unit Port Broughton',
1908651: 'SAAS Unit Bordertown',
1916015: 'SAAS Unit Renmark',
1916073: 'SAAS Unit Murray Bridge',
1929745: 'SAAS Unit Lima Wiskey',
1916683: 'SAAS Unit Penneshaw',
1909687: 'SAAS Operations Manager South',
1933111: 'SAAS Unit Oakden',
1800144: 'SAAS Unit Coober Pedy',
1908538: 'SAAS Unit Bordertown',
1931351: 'SAAS Doctor Tanunda',
1916074: 'SAAS Unit Murray Bridge',
1929669: 'SAAS Unit Gepps Cross',
1924262: 'SAAS Unit Warooka',
1916053: 'SAAS Unit Port Augusta',
1916095: 'SAAS Unit Streaky Bay',
1916083: 'SAAS Unit Victor Harbor',
1930810: 'SAAS Extended Care Paramedic',
1931007: 'Medstar 81',
1916012: 'SAAS Unit Mannum',
1932508: 'SAAS Unit Salisbury',
1930794: 'SAAS Extended Care Paramedic',
1929090: 'SAAS Community Responder Port Victoria',
1931068: 'Medstar 54',
1931210: 'SAAS Major Events',
1932478: 'SAAS Unit LO1 (Southern Team Leader)',
1908542: 'SAAS Unit Penola',
1931360: 'SAAS Doctor Port Broughton',
1916710: 'SAAS Unit Strathalbyn',
1915597: 'SAAS Unit Woodside',
1916943: 'SAAS Operations Manager North',
1932227: 'SAAS Extended Care Paramedic',
1932796: 'SAAS Unit Ceduna',
1924261: 'SAAS Unit My400',
1927118: 'SAAS Unit Wallaroo',
1916029: 'SAAS Unit Burra',
1930440: 'SAAS SPRINT Car',
1928918: 'SAAS Operations Manager South',
1930444: 'SAAS SPRINT Car',
1931066: 'Medstar 94',
1916947: 'SAAS Operations Manager North',
1932649: 'SAAS Unit Clinical Support',
1916058: 'SAAS Unit Port Lincoln',
1909686: 'SAAS Operations Manager South',
1933170: 'SAAS Unit Noarlunga',
1931621: 'SAAS CEO Jason Killens',
1931352: 'SAAS Doctor Robe',
1930442: 'SAAS SPRINT Car',
1931446: 'SAAS Unit Lima Oscar',
1931350: 'SAAS Doctor Mannum',
1930777: 'SAAS Unit Salisbury',
1931913: 'SAAS Unit Parkside',
1916049: 'SAAS Unit Moonta',
1931209: 'SAAS Major Events',
1925571: 'SAAS Unit Redwood Park',
1932609: 'SAAS Unit Coober Pedy',
1916042: 'SAAS Unit Kadina',
1931344: 'SAAS Doctor Waikerie',
1925584: 'SAAS Unit Ashford',
1929676: 'SAAS Clinical Support',
1930458: 'SAAS Unit Mclaren Vale',
1916050: 'SAAS Unit Orroroo',
1931045: 'Medstar 180',
1925609: 'SAAS Unit Fulham',
1931826: 'SAAS Unit Millicent',
1916557: 'SAAS Unit Port Augusta',
1916556: 'SAAS Unit Port Augusta',
1925619: 'SAAS Unit Gepps Cross',
1908546: 'SAAS Unit Mount Gambier',
1929307: 'SAAS Unit Oodnadatta',
1916591: 'SAAS Unit Port Pirie',
1800141: 'SAAS Unit Ceduna',
1925590: 'SAAS Unit Campbelltown',
1931201: 'SAAS Major Events',
1930813: 'SAAS Extended Care Paramedic',
1916090: 'SAAS Unit Yankalilla',
1916099: 'SAAS Unit Whyalla',
1927377: 'SAAS Unit Port MacDonnell',
1930706: 'SAAS Operations Manager South',
1916555: 'SAAS Unit Port Augusta',
1925606: 'SAAS Unit Edwardstown',
1930439: 'SAAS SPRINT Car',
1931044: 'Medstar 54',
1933120: 'SAAS Unit Salisbury',
1932519: 'RAH Discharge Lounge',
1916078: 'SAAS Unit Penneshaw',
1925569: 'SAAS Unit Edwardstown',
1924810: 'SAAS Unit Port Neill',
1800065: 'SAAS Unit Port Augusta',
1931008: 'Medstar 181',
1925567: 'SAAS Unit Noarlunga',
1908540: 'SAAS Unit Keith',
1931663: 'SAAS Major Events',
1925622: 'SAAS Unit Gepps Cross',
1932369: 'SAAS Major Events',
1925563: 'SAAS Unit Gepps Cross',
1931897: 'SAAS Unit Prospect',
1925617: 'SAAS Unit Gepps Cross',
1925612: 'SAAS Unit Fulham',
1916156: 'SAAS Unit Whyalla',
1930630: 'SAAS Unit Transport Bus',
1929672: 'SAAS Unit Salisbury',
1916047: 'SAAS Unit Marla',
1933060: 'SAAS Unit SARRT Roaming Crew',
1916580: 'SAAS Unit Port Lincoln',
1908552: 'SAAS Unit Robe',
1933173: 'SAAS Unit Noarlunga',
1916034: 'SAAS Unit Coober Pedy',
1925616: 'SAAS Unit Fulham',
1932490: 'SAAS Unit Parkside',
1916052: 'SAAS Unit Port Augusta',
1917141: 'SAAS Unit Lima Echo',
1933123: 'SAAS Unit Salisbury',
1916091: 'SAAS Unit American River',
1930812: 'SAAS Unit Prospect',
1908543: 'SAAS Unit Nangwarry',
1929675: 'SAAS Clinical Support',
1925573: 'SAAS Unit Noarlunga',
1916043: 'SAAS Unit Kimba',
1908785: 'SAAS Regional Team Leader Fleurieu Peninsula',
1929128: 'SAAS Community Responder Port Victoria',
1908547: 'SAAS Unit Mount Gambier',
1932361: 'SAAS Major Events',
1908784: 'SAAS Operations Manager North',
1929246: 'SAAS Unit Naracoorte',
1931906: 'SAAS Unit Parkside',
1926021: 'SAAS Unit Mount Gambier',
1932354: 'SAAS Unit Salisbury',
1929744: 'SAAS Unit Lima Sierra',
1931064: 'Medstar 70',
1929127: 'SAAS Community Responder Port Victoria',
1908781: 'SAAS Operations Manager South',
1931365: 'SAAS Extended Care Paramedic',
1909714: 'SAAS Unit Waikerie',
1924690: 'SAAS Unit Gawler',
1925613: 'SAAS Unit Fulham',
1916946: 'SAAS Operations Manager South',
1931638: 'SAAS Special Operations Team',
1919580: 'SAAS Unit Port Lincoln',
1931013: 'SAAS Unit Berri',
1931583: 'Medstar 90',
1908536: 'SAAS Unit Tintinara',
1931354: 'SAAS Doctor Kadina',
1925576: 'SAAS Unit Brooklyn Park',
1931362: 'SAAS Doctor Penneshaw',
1916033: 'SAAS Unit Coffin Bay',
1916105: 'SAAS Unit Wudinna',
1915607: 'SAAS Unit Mount Barker',
1925588: 'SAAS Unit Mitcham',
1916584: 'SAAS Unit Port Lincoln',
1923158: 'SAAS Operations Manager North',
1928881: 'SAAS Unit Mount Barker',
1916020: 'SAAS Unit Hamley Bridge',
1916051: 'SAAS Unit Peterborough',
1916009: 'SAAS Unit Eudunda',
1916071: 'SAAS Unit Meadows',
1931046: 'Medstar 210',
1931586: 'Medstar 210',
1930443: 'SAAS SPRINT Car',
1925574: 'SAAS Unit Noarlunga',
1931358: 'SAAS Doctor Tumby Bay',
1932511: 'SAAS Unit Salisbury',
1916079: 'SAAS Unit Strathalbyn',
1931012: 'Medstar 94',
1931602: 'SAAS Operations Manager South',
1925598: 'SAAS Unit Salisbury',
1927147: 'SAAS Unit Bariatric Truck',
1927167: 'SAAS Unit Playford',
1930457: 'SAAS Unit Fulham',
1916094: 'SAAS Unit Snowtown',
1931445: 'SAAS Unit Beachport',
1925596: 'SAAS Unit Salisbury',
1916590: 'SAAS Unit Port Pirie',
1916069: 'SAAS Unit Port Wakefield',
1930815: 'SAAS Unit Port Adelaide',
1930224: 'SAAS Unit Fulham',
1916038: 'SAAS Unit Elliston',
1931339: 'SAAS Unit Motorbike Patrol',
1916942: 'SAAS Operations Manager North',
1929674: 'SAAS Clinical Support',
1916054: 'SAAS Unit Port Augusta',
1908545: 'SAAS Unit Naracoorte',
146757: 'SAAS Unit SARRT Riverland',
1931614: 'SAAS Unit Renmark',
1930441: 'SAAS SPRINT Car',
1916013: 'SAAS Unit Morgan',
1931520: 'SAAS Unit Fulham',
1925562: 'SAAS Unit Gepps Cross',
1932351: 'SAAS Unit Port Adelaide',
146754: 'SAAS Unit SARRT Port Pirie',
1930792: 'SAAS Unit Driver Training',
1916030: 'SAAS Unit Ceduna',
1930818: 'SAAS Unit Mount Barker',
1925615: 'SAAS Unit Gepps Cross',
1931202: 'SAAS Major Events',
1916750: 'SAAS Unit Woodside',
1800064: 'SAAS Operations Manager North',
1930567: 'SAAS Psychiatric Transfer Assistant',
1916596: 'SAAS Operations Manager North',
1916018: 'SAAS Unit Goolwa',
1916967: 'SAAS Unit Berri',
1932479: 'SAAS Unit LP1 (Northern Team Leader)',
1931404: 'SAAS Team Leader',
1925579: 'SAAS Unit Port Adelaide',
1930456: 'SAAS Unit Fulham',
1932484: 'SAAS Unit Port Adelaide',
1931002: 'Medstar 208',
1931921: 'SAAS Doctor Renmark',
1931047: 'Medstar 55',
1931357: 'SAAS Doctor Kimba',
1927413: 'SAAS Unit CR AC',
1916021: 'SAAS Unit Karoonda',
1931347: 'SAAS Doctor Murray Bridge',
1908750: 'SAAS Unit Robe',
1916968: 'SAAS Unit Berri',
1916037: 'SAAS Unit Cummins',
1925605: 'SAAS Unit Fulham',
1916944: 'SAAS Operations Manager North',
1929007: 'SAAS Clinical Support',
1925614: 'SAAS Unit Fulham',
1930780: 'SAAS Unit Noarlunga',
1932230: 'SAAS Doctor Naracoorte',
1931000: 'Medstar 35',
1908544: 'SAAS Unit Millicent',
1931011: 'SAAS Doctor Goolwa',
1925586: 'SAAS Unit Parkside',
1916093: 'SAAS Unit Roxby Downs',
1925587: 'SAAS Unit Parkside',
1931340: 'SAAS Unit Motorbike Patrol',
1916036: 'SAAS Unit Crystal Brook',
1909704: 'SAAS Unit Renmark',
146758: 'SAAS Unit SARRT Eyre and Western',
1925595: 'SAAS Unit Prospect',
1916202: 'SAAS Unit Ceduna',
1916558: 'SAAS Unit Port Augusta',
1925568: 'SAAS Unit Edwardstown',
1916072: 'SAAS Unit Mount Pleasant',
1916579: 'SAAS Unit Port Lincoln',
1931346: 'SAAS Doctor Penola',
1931637: 'SAAS Doctor Lameroo',
1926225: 'SAAS Unit Salisbury',
1926948: 'SAAS Unit Lima Victor',
1931348: 'SAAS Doctor Booleroo',
1924688: 'SAAS Unit Gawler',
1932360: 'SAAS Unit Seaford',
1916089: 'SAAS Unit Woodside',
1925572: 'SAAS Unit Noarlunga',
1930493: 'SAAS Unit Mount Gambier',
1916044: 'SAAS Unit Leigh Creek',
1932229: 'SAAS Extended Care Paramedic',
1916081: 'SAAS Unit Victor Harbor',
1933166: 'SAAS Unit Campbelltown',
1916082: 'SAAS Unit Victor Harbor',
1930817: 'SAAS Unit Gepps Cross',
1908535: 'SAAS Unit Meningie',
1908783: 'SAAS Unit Driver Training',
1916017: 'SAAS Unit Waikerie',
146755: 'SAAS Unit SARRT Far North',
1933114: 'SAAS Unit Oakden',
1925610: 'SAAS Unit Parkside',
1931666: 'SAAS Unit Glengowrie',
1916007: 'SAAS Unit Barmera',
1931361: 'SAAS Doctor Nuriootpa',
1924307: 'SAAS Operations Manager North',
1908551: 'SAAS Unit Lucindale',
1932487: 'SAAS Unit Prospect',
1924962: 'SAAS MVA / Other Emerg',
1925577: 'SAAS Unit Fulham',
1916045: 'SAAS Unit Lock',
1916084: 'SAAS Unit Victor Harbor',
1925600: 'SAAS Unit Prospect',
1930999: 'Medstar 107',
1916019: 'SAAS Unit Parndana',
1908548: 'SAAS Unit Mount Gambier',
1925597: 'SAAS Unit Salisbury',
1909703: 'SAAS Unit Barossa Valley',
1932780: 'SAAS Community Paramedic',
1916097: 'SAAS Unit Wallaroo',
1927694: 'SAAS Unit Clinical Support',
1916055: 'SAAS Unit Port Augusta',
1927495: 'SAAS Operations Manager North',
1927148: 'SAAS Unit Fulham',
1925611: 'SAAS Unit Fulham',
1916046: 'SAAS Unit Maitland',
1927697: 'SAAS Unit Millicent',
1908550: 'SAAS Unit Mount Gambier',
1800051: 'SAAS Unit Gawler',
1928071: 'SAAS State Duty Manager',
1925582: 'SAAS Unit Port Adelaide',
1932497: 'SAAS Unit Playford',
1925575: 'SAAS Unit Camden Park',
1932682: 'SAAS Unit Modbury Hospital',
1916006: 'SAAS Unit Barossa Valley',
1925564: 'SAAS Unit Port Adelaide',
1930998: 'Medstar 80',
1916096: 'SAAS Unit Tumby Bay',
1928979: 'SAAS Operations Manager North',
1925621: 'SAAS Unit Gepps Cross',
1931345: 'SAAS Doctor Yankalilla',
1932496: 'SAAS Unit Marion',
1925585: 'SAAS Unit Ashford',
1916070: 'SAAS Unit Mallala',
1924334: 'SAAS Unit Riverton',
1909702: 'SAAS Unit Barossa Valley',
1924044: 'SAAS Unit Stirling',
1926129: 'SAAS Clinical Support',
1931569: 'SAAS Special Operations Team',
1925581: "SAAS Unit O'Halloran Hill",
1933161: 'SAAS Unit Oakden',
1916024: 'SAAS Unit Ardrossan',
1930820: 'SAAS Unit Oakden',
1925589: 'SAAS Unit Redwood Park',
1931657: 'SAAS Unit Goolwa',
1932782: 'SAAS Community Paramedic',
1925592: 'SAAS Unit Campbelltown',
1925578: 'SAAS Unit Fulham',
1924063: 'SAAS Unit Stirling',
1909688: 'SAAS Unit Barossa Valley',
1916011: 'SAAS Unit Loxton',
1931001: 'Medstar 210',
1916755: 'SAAS Unit Woodside',
1925594: 'SAAS Unit Prospect',
1926228: 'SAAS Unit Edwardstown',
1932797: 'SAAS Unit Mount Gambier ACTL',
1931696: 'SAAS Extended Care Paramedic',
1929742: 'SAAS Unit Lima Echo',
1916032: 'SAAS Unit Cleve',
1931203: 'SAAS Major Events',
1925618: 'SAAS Unit Gepps Cross',
1916185: 'SAAS Unit Burra',
1928957: 'SAAS Operations Manager',
1916022: 'SAAS Unit Kingscote',
1931349: 'SAAS Doctor Caltowie',
1907760: 'SAAS Unit Coomandook',
1931356: 'SAAS Doctor Quorn',
1931003: 'Medstar 94',
1916108: 'SAAS Unit Yunta',
1909701: 'SAAS Unit Loxton',
1927115: 'SAAS Unit Wallaroo',
1925593: 'SAAS Unit Playford',
1932588: 'SAAS Unit Hi708 (ICP Solo Responder)',
1916102: 'SAAS Unit Whyalla',
1928861: 'SAAS Operations Manager North',
1925580: 'SAAS Unit Marion',
1932505: 'SAAS Unit Salisbury',
1931694: 'SAAS Extended Care Paramedic',
1907898: 'SAAS Clinical Support',
1916014: 'SAAS Unit Pinaroo',
1931903: 'SAAS Unit Campbelltown',
1927485: 'SAAS Operations Manager',
1908711: 'SAAS Unit Naracoorte',
1929844: 'SAAS Doctor Normanville',
1916031: 'SAAS Unit Clare',
1925566: 'SAAS Unit Port Adelaide',
1930819: 'SAAS Unit Salisbury',
1908554: 'SAAS Unit Padthaway',
1931043: 'Medstar 90',
1916035: 'SAAS Unit Cowell',
1916101: 'SAAS Unit Whyalla',
1916040: 'SAAS Unit Hawker',
1925624: 'SAAS Unit Edwardstown',
1916080: 'SAAS Unit Tailem Bend',
1925565: 'SAAS Unit Salisbury',
1932264: 'SAAS Operations Manager South',
1916025: 'SAAS Unit Balaklava',
1932781: 'SAAS Unit C04',
1925561: 'SAAS Unit Edwardstown',
1932357: 'SAAS Major Events',
1931060: 'Medstar 180',
1909683: 'SAAS Unit Barossa Valley',
1932265: 'SAAS Unit Murray Bridge',
1932799: 'SAAS Unit Oakden',
1932493: 'SAAS Unit Marion',
1916041: 'SAAS Unit Jamestown',
1929743: 'SAAS Unit Lima November',
1931790: 'SAAS Unit Goolwa',
1916866: 'SAAS Unit Wallaroo',
146752: 'SAAS Unit SARRT Murray Coorong',
1916067: 'SAAS Unit Port Pirie',
1800147: 'SAAS Unit Fleurieu Peninsula',
1927491: 'SAAS Operations Manager South',
1931568: 'SAAS Special Operations Team',
1930197: 'SAAS Unit Salt Creek',
1926227: 'SAAS Unit Ashford',
1932648: 'SAAS Doctor Clare',
1916016: 'SAAS Unit Swan Reach',
1925570: 'SAAS Unit Noarlunga',
1931067: 'Medstar 48',
1928839: 'SAAS Unit Gepps Cross',
1916092: 'SAAS Unit Quorn',
1931343: 'SAAS Doctor Millicent',
1925623: 'SAAS Unit Gepps Cross',
1932659: 'SAAS Unit Strathalbyn',
1927117: 'SAAS Unit Gawler',
1930816: 'SAAS Unit Mitcham',
1924306: 'SAAS Operations Manager North',
1930234: 'SAAS Unit Papa Sierra',
1933185: 'SAAS Unit Noarlunga',
1932364: 'SAAS Operations Manager South',
1929717: 'SAAS Unit Team Leader',
1930811: 'SAAS Extended Care Paramedic',
1909713: 'SAAS Unit Berri',
1916076: 'SAAS Unit Murray Bridge',
1916100: 'SAAS Unit Whyalla',
# SES
1926033: 'SES Port Lincoln Info',
1923247: 'SES Streaky Bay',
1915540: 'SES Loxton',
1908220: 'SES State Notify City',
1926098: 'SES Renmark and Paringa Info',
1925059: 'SES Mount Barker Info',
1906610: 'SES Meningie',
1928013: 'SES Regional Officer',
1908022: 'SES Kangaroo Island',
1908042: 'SES SES ? Info (Know who this is? Let us know!)',
1927954: 'SES West Region Officer',
1915501: 'SES Andamooka',
1926813: 'SES SES State Notify West',
1925170: 'SES Enfield Info',
1923240: 'SES Nullarbor',
1908094: 'SES Western Adelaide',
1918225: 'SES Port Broughton',
1908027: 'SES Kapunda',
1918231: 'SES Snowtown',
1908093: 'SES Western Adelaide Info',
1918229: 'SES Quorn',
1926274: 'SES Millicent Info',
1918213: 'SES Hawker',
1908063: 'SES Prospect',
1905914: 'SES SITREPS',
1906624: 'SES Bordertown',
1906658: 'SES Mount Gambier and Districts',
1923244: 'SES Port Lincoln',
1908032: 'SES Metro South',
1908002: 'SES Mount Barker',
1918215: 'SES Laura',
1915537: 'SES Roxby Downs',
1923236: 'SES Cummins',
1918201: 'SES Burra',
1925782: 'SES Metro South Info',
1915503: 'SES Booleroo',
1906645: 'SES West Region',
1923234: 'SES Cleve',
1918235: 'SES Warooka',
1926811: 'SES State Notify East',
1926070: 'SES South Coast Info',
1911993: 'SES Out Of Area 03',
1918203: 'SES Bute',
1908083: 'SES Strathalbyn',
1908045: 'SES Murray Bridge and District',
1906687: 'SES Kingston',
1918205: 'SES Clare',
1918223: 'SES Port Augusta',
1906674: 'SES Millicent',
1924131: 'SES Whyalla Info',
1928012: 'SES East Regional Officer',
1925072: 'SES Noarlunga Info',
1925793: 'SES Barmera Info',
1924138: 'SES Blanchetown',
1908007: 'SES Campbelltown',
1908086: 'SES Tea Tree Gully',
1908084: 'SES Sturt Info',
1908055: 'SES Salisbury',
1918207: 'SES Cockburn',
1915574: 'SES Renmark and Paringa',
1908009: 'SES Eastern Suburbs Info',
1908061: 'SES Prospect',
1925060: 'SES Loxton',
1923231: 'SES Ceduna',
1925102: 'SES South East Operations Coordination Unit',
1908073: 'SES South Coast',
1918237: 'SES Berri',
1906642: 'SES Keith',
1908053: 'SES Noarlunga Officers?',
1925001: 'SES Campbelltown Info',
1908012: 'SES Eastern Suburbs',
1908017: 'SES Enfield',
1908528: 'SES State Duty Officer',
1908080: 'SES Strathalbyn Info',
1918227: 'SES Port Pirie',
1924135: 'SES Whyalla',
1915521: 'SES Leigh Creek',
1908099: 'SES Yankalilla',
1923230: 'SES West Coast',
1908087: 'SES Tea Tree Gully Info',
1918219: 'SES Marla',
1908065: 'SES Saddleworth and District',
1918209: 'SES Coober Pedy',
1918057: 'SES Onkaparinga',
1908054: 'SES Noarlunga',
1918217: 'SES Maitland',
1906668: 'SES K9 Search & Rescue Team',
1908019: 'SES Sturt',
1925054: 'SES Salisbury Info',
1908051: 'SES Swiftwater Response Group',
1928011: 'SES Central Region Officer',
1926812: 'SES State Notify North',
1908070: 'SES Headquarters Unit',
1918233: 'SES Spalding',
1923251: 'SES Wudinna',
1923249: 'SES Tumby Bay',
1923238: 'SES Kimba',
1918221: 'SES Mintabie',
1918211: 'SES Hallett',
1918239: 'SES Barmera',
1926033: 'SES Port Lincoln Info',
1923247: 'SES Streaky Bay',
1915540: 'SES Loxton',
1908220: 'SES State Notify City',
1926098: 'SES Renmark and Paringa Info',
1925059: 'SES Mount Barker Info',
1906610: 'SES Meningie',
1928013: 'SES Regional Officer',
1908022: 'SES Kangaroo Island',
1908042: 'SES SES ? Info (Know who this is? Let us know!)',
1927954: 'SES West Region Officer',
1915501: 'SES Andamooka',
1926813: 'SES SES State Notify West',
1925170: 'SES Enfield Info',
1923240: 'SES Nullarbor',
1908094: 'SES Western Adelaide',
1918225: 'SES Port Broughton',
1908027: 'SES Kapunda',
1918231: 'SES Snowtown',
1908093: 'SES Western Adelaide Info',
1918229: 'SES Quorn',
1926274: 'SES Millicent Info',
1918213: 'SES Hawker',
1908063: 'SES Prospect',
1905914: 'SES SITREPS',
1906624: 'SES Bordertown',
1906658: 'SES Mount Gambier and Districts',
1923244: 'SES Port Lincoln',
1908032: 'SES Metro South',
1908002: 'SES Mount Barker',
1918215: 'SES Laura',
1915537: 'SES Roxby Downs',
1923236: 'SES Cummins',
1918201: 'SES Burra',
1925782: 'SES Metro South Info',
1915503: 'SES Booleroo',
1906645: 'SES West Region',
1923234: 'SES Cleve',
1918235: 'SES Warooka',
1926811: 'SES State Notify East',
1926070: 'SES South Coast Info',
1911993: 'SES Out Of Area 03',
1918203: 'SES Bute',
1908083: 'SES Strathalbyn',
1908045: 'SES Murray Bridge and District',
1906687: 'SES Kingston',
1918205: 'SES Clare',
1918223: 'SES Port Augusta',
1906674: 'SES Millicent',
1924131: 'SES Whyalla Info',
1928012: 'SES East Regional Officer',
1925072: 'SES Noarlunga Info',
1925793: 'SES Barmera Info',
1924138: 'SES Blanchetown',
1908007: 'SES Campbelltown',
1908086: 'SES Tea Tree Gully',
1908084: 'SES Sturt Info',
1908055: 'SES Salisbury',
1918207: 'SES Cockburn',
1915574: 'SES Renmark and Paringa',
1908009: 'SES Eastern Suburbs Info',
1908061: 'SES Prospect',
1925060: 'SES Loxton',
1923231: 'SES Ceduna',
1925102: 'SES South East Operations Coordination Unit',
1908073: 'SES South Coast',
1918237: 'SES Berri',
1906642: 'SES Keith',
1908053: 'SES Noarlunga Officers?',
1925001: 'SES Campbelltown Info',
1908012: 'SES Eastern Suburbs',
1908017: 'SES Enfield',
1908528: 'SES State Duty Officer',
1908080: 'SES Strathalbyn Info',
1918227: 'SES Port Pirie',
1924135: 'SES Whyalla',
1915521: 'SES Leigh Creek',
1908099: 'SES Yankalilla',
1923230: 'SES West Coast',
1908087: 'SES Tea Tree Gully Info',
1918219: 'SES Marla',
1908065: 'SES Saddleworth and District',
1918209: 'SES Coober Pedy',
1918057: 'SES Onkaparinga',
1908054: 'SES Noarlunga',
1918217: 'SES Maitland',
1906668: 'SES K9 Search & Rescue Team',
1908019: 'SES Sturt',
1925054: 'SES Salisbury Info',
1908051: 'SES Swiftwater Response Group',
1928011: 'SES Central Region Officer',
1926812: 'SES State Notify North',
1908070: 'SES Headquarters Unit',
1918233: 'SES Spalding',
1923251: 'SES Wudinna',
1923249: 'SES Tumby Bay',
1923238: 'SES Kimba',
1918221: 'SES Mintabie',
1918211: 'SES Hallett',
1918239: 'SES Barmera',
# St John
1901532: 'St John State Duty Officer',
1931910: 'St John Communications Group',
1931521: 'St John State Duty Officer',
1931530: 'St John State Duty Officer',
1931537: 'St John State Duty Officer',
1931532: 'St John State Duty Officer',
1931907: 'St John Tech Services',
1901532: 'St John State Duty Officer',
1931910: 'St John Communications Group',
1931537: 'St John State Duty Officer',
1931532: 'St John State Duty Officer',
1931907: 'St John Tech Services',
# Sea Rescue
1925269: 'Sea Rescue Squadron Victor Harbor / Goolwa',
# WES Woomera Emergency Services
1927242: 'WES Woomera Emergency Services',
# RAAF Fire Service
1932240: 'RAAF Edinburgh Fire Service',
# DEWNR
1919243: 'DEWNR Mambray Creek Response',
1915793: 'DEWNR Adelaide Region',
# Fauna Rescue SA
1909509: 'Fauna Rescue SA',
# GRN Techs
1900157: 'SAGRN Network Faults',
1900159: 'Watchdog Messages',
1900253: 'Perodic Page Messages',
}
|
Shaggs/cfsprinter
|
src/pagerprinter/misc/sacfs_flexcode.py
|
Python
|
gpl-3.0
| 103,474
|
[
"CRYSTAL"
] |
ba3b4c94f0d83cd774eb7f0c7fc6a3318eee1054d33ce9c70eda4dc5a577e183
|
from sys import version_info
from unittest import TestCase
from nose import SkipTest
from nose.tools import eq_, assert_raises, ok_
from parsimonious.exceptions import UndefinedLabel, ParseError
from parsimonious.expressions import Sequence
from parsimonious.grammar import rule_grammar, RuleVisitor, Grammar, TokenGrammar, LazyReference
from parsimonious.nodes import Node
from parsimonious.utils import Token
from six import text_type
class BootstrappingGrammarTests(TestCase):
"""Tests for the expressions in the grammar that parses the grammar
definition syntax"""
def test_quantifier(self):
text = '*'
eq_(rule_grammar['quantifier'].parse(text),
Node('quantifier', text, 0, 1, children=[
Node('', text, 0, 1), Node('_', text, 1, 1)]))
text = '?'
eq_(rule_grammar['quantifier'].parse(text),
Node('quantifier', text, 0, 1, children=[
Node('', text, 0, 1), Node('_', text, 1, 1)]))
text = '+'
eq_(rule_grammar['quantifier'].parse(text),
Node('quantifier', text, 0, 1, children=[
Node('', text, 0, 1), Node('_', text, 1, 1)]))
def test_spaceless_literal(self):
text = '"anything but quotes#$*&^"'
eq_(rule_grammar['spaceless_literal'].parse(text),
Node('spaceless_literal', text, 0, len(text), children=[
Node('', text, 0, len(text))]))
text = r'''r"\""'''
eq_(rule_grammar['spaceless_literal'].parse(text),
Node('spaceless_literal', text, 0, 5, children=[
Node('', text, 0, 5)]))
def test_regex(self):
text = '~"[a-zA-Z_][a-zA-Z_0-9]*"LI'
eq_(rule_grammar['regex'].parse(text),
Node('regex', text, 0, len(text), children=[
Node('', text, 0, 1),
Node('spaceless_literal', text, 1, 25, children=[
Node('', text, 1, 25)]),
Node('', text, 25, 27),
Node('_', text, 27, 27)]))
def test_successes(self):
"""Make sure the PEG recognition grammar succeeds on various inputs."""
ok_(rule_grammar['label'].parse('_'))
ok_(rule_grammar['label'].parse('jeff'))
ok_(rule_grammar['label'].parse('_THIS_THING'))
ok_(rule_grammar['atom'].parse('some_label'))
ok_(rule_grammar['atom'].parse('"some literal"'))
ok_(rule_grammar['atom'].parse('~"some regex"i'))
ok_(rule_grammar['quantified'].parse('~"some regex"i*'))
ok_(rule_grammar['quantified'].parse('thing+'))
ok_(rule_grammar['quantified'].parse('"hi"?'))
ok_(rule_grammar['term'].parse('this'))
ok_(rule_grammar['term'].parse('that+'))
ok_(rule_grammar['sequence'].parse('this that? other'))
ok_(rule_grammar['ored'].parse('this / that+ / "other"'))
# + is higher precedence than &, so 'anded' should match the whole
# thing:
ok_(rule_grammar['lookahead_term'].parse('&this+'))
ok_(rule_grammar['expression'].parse('this'))
ok_(rule_grammar['expression'].parse('this? that other*'))
ok_(rule_grammar['expression'].parse('&this / that+ / "other"'))
ok_(rule_grammar['expression'].parse('this / that? / "other"+'))
ok_(rule_grammar['expression'].parse('this? that other*'))
ok_(rule_grammar['rule'].parse('this = that\r'))
ok_(rule_grammar['rule'].parse('this = the? that other* \t\r'))
ok_(rule_grammar['rule'].parse('the=~"hi*"\n'))
ok_(rule_grammar.parse('''
this = the? that other*
that = "thing"
the=~"hi*"
other = "ahoy hoy"
'''))
class RuleVisitorTests(TestCase):
"""Tests for ``RuleVisitor``
As I write these, Grammar is not yet fully implemented. Normally, there'd
be no reason to use ``RuleVisitor`` directly.
"""
def test_round_trip(self):
"""Test a simple round trip.
Parse a simple grammar, turn the parse tree into a map of expressions,
and use that to parse another piece of text.
Not everything was implemented yet, but it was a big milestone and a
proof of concept.
"""
tree = rule_grammar.parse('''number = ~"[0-9]+"\n''')
rules, default_rule = RuleVisitor().visit(tree)
text = '98'
eq_(default_rule.parse(text), Node('number', text, 0, 2))
def test_undefined_rule(self):
"""Make sure we throw the right exception on undefined rules."""
tree = rule_grammar.parse('boy = howdy\n')
assert_raises(UndefinedLabel, RuleVisitor().visit, tree)
def test_optional(self):
tree = rule_grammar.parse('boy = "howdy"?\n')
rules, default_rule = RuleVisitor().visit(tree)
howdy = 'howdy'
# It should turn into a Node from the Optional and another from the
# Literal within.
eq_(default_rule.parse(howdy), Node('boy', howdy, 0, 5, children=[
Node('', howdy, 0, 5)]))
class GrammarTests(TestCase):
"""Integration-test ``Grammar``: feed it a PEG and see if it works."""
def test_expressions_from_rules(self):
"""Test the ``Grammar`` base class's ability to compile an expression
tree from rules.
That the correct ``Expression`` tree is built is already tested in
``RuleGrammarTests``. This tests only that the ``Grammar`` base class's
``_expressions_from_rules`` works.
"""
greeting_grammar = Grammar('greeting = "hi" / "howdy"')
tree = greeting_grammar.parse('hi')
eq_(tree, Node('greeting', 'hi', 0, 2, children=[
Node('', 'hi', 0, 2)]))
def test_unicode(self):
"""Assert that a ``Grammar`` can convert into a string-formatted series
of rules."""
grammar = Grammar(r"""
bold_text = bold_open text bold_close
text = ~"[A-Z 0-9]*"i
bold_open = "(("
bold_close = "))"
""")
lines = text_type(grammar).splitlines()
eq_(lines[0], 'bold_text = bold_open text bold_close')
ok_('text = ~"[A-Z 0-9]*"i%s' % ('u' if version_info >= (3,) else '')
in lines)
ok_('bold_open = "(("' in lines)
ok_('bold_close = "))"' in lines)
eq_(len(lines), 4)
def test_match(self):
"""Make sure partial-matching (with pos) works."""
grammar = Grammar(r"""
bold_text = bold_open text bold_close
text = ~"[A-Z 0-9]*"i
bold_open = "(("
bold_close = "))"
""")
s = ' ((boo))yah'
eq_(grammar.match(s, pos=1), Node('bold_text', s, 1, 8, children=[
Node('bold_open', s, 1, 3),
Node('text', s, 3, 6),
Node('bold_close', s, 6, 8)]))
def test_bad_grammar(self):
"""Constructing a Grammar with bad rules should raise ParseError."""
assert_raises(ParseError, Grammar, 'just a bunch of junk')
def test_comments(self):
"""Test tolerance of comments and blank lines in and around rules."""
grammar = Grammar(r"""# This is a grammar.
# It sure is.
bold_text = stars text stars # nice
text = ~"[A-Z 0-9]*"i #dude
stars = "**"
# Pretty good
#Oh yeah.#""") # Make sure a comment doesn't need a
# \n or \r to end.
eq_(list(sorted(str(grammar).splitlines())),
['''bold_text = stars text stars''',
# TODO: Unicode flag is on by default in Python 3. I wonder if we
# should turn it on all the time in Parsimonious.
'''stars = "**"''',
'''text = ~"[A-Z 0-9]*"i%s''' % ('u' if version_info >= (3,)
else '')])
def test_multi_line(self):
"""Make sure we tolerate all sorts of crazy line breaks and comments in
the middle of rules."""
grammar = Grammar("""
bold_text = bold_open # commenty comment
text # more comment
bold_close
text = ~"[A-Z 0-9]*"i
bold_open = "((" bold_close = "))"
""")
ok_(grammar.parse('((booyah))') is not None)
def test_not(self):
"""Make sure "not" predicates get parsed and work properly."""
grammar = Grammar(r'''not_arp = !"arp" ~"[a-z]+"''')
assert_raises(ParseError, grammar.parse, 'arp')
ok_(grammar.parse('argle') is not None)
def test_lookahead(self):
grammar = Grammar(r'''starts_with_a = &"a" ~"[a-z]+"''')
assert_raises(ParseError, grammar.parse, 'burp')
s = 'arp'
eq_(grammar.parse('arp'), Node('starts_with_a', s, 0, 3, children=[
Node('', s, 0, 0),
Node('', s, 0, 3)]))
def test_parens(self):
grammar = Grammar(r'''sequence = "chitty" (" " "bang")+''')
# Make sure it's not as if the parens aren't there:
assert_raises(ParseError, grammar.parse, 'chitty bangbang')
s = 'chitty bang bang'
eq_(str(grammar.parse(s)),
"""<Node called "sequence" matching "chitty bang bang">
<Node matching "chitty">
<Node matching " bang bang">
<Node matching " bang">
<Node matching " ">
<Node matching "bang">
<Node matching " bang">
<Node matching " ">
<Node matching "bang">""")
def test_resolve_refs_order(self):
"""Smoke-test a circumstance where lazy references don't get resolved."""
grammar = Grammar("""
expression = "(" terms ")"
terms = term+
term = number
number = ~r"[0-9]+"
""")
grammar.parse('(34)')
def test_infinite_loop(self):
"""Smoke-test a grammar that was causing infinite loops while building.
This was going awry because the "int" rule was never getting marked as
resolved, so it would just keep trying to resolve it over and over.
"""
Grammar("""
digits = digit+
int = digits
digit = ~"[0-9]"
number = int
main = number
""")
def test_right_recursive(self):
"""Right-recursive refs should resolve."""
grammar = Grammar("""
digits = digit digits?
digit = ~r"[0-9]"
""")
ok_(grammar.parse('12') is not None)
def test_badly_circular(self):
"""Uselessly circular references should be detected by the grammar
compiler."""
raise SkipTest('We have yet to make the grammar compiler detect these.')
grammar = Grammar("""
foo = bar
bar = foo
""")
def test_parens_with_leading_whitespace(self):
"""Make sure a parenthesized expression is allowed to have leading
whitespace when nested directly inside another."""
Grammar("""foo = ( ("c") )""").parse('c')
def test_single_quoted_literals(self):
Grammar("""foo = 'a' '"'""").parse('a"')
def test_simple_custom_rules(self):
"""Run 2-arg custom-coded rules through their paces."""
grammar = Grammar("""
bracketed_digit = start digit end
start = '['
end = ']'""",
digit=lambda text, pos:
(pos + 1) if text[pos].isdigit() else None)
s = '[6]'
eq_(grammar.parse(s),
Node('bracketed_digit', s, 0, 3, children=[
Node('start', s, 0, 1),
Node('digit', s, 1, 2),
Node('end', s, 2, 3)]))
def test_complex_custom_rules(self):
"""Run 5-arg custom rules through their paces.
Incidentally tests returning an actual Node from the custom rule.
"""
grammar = Grammar("""
bracketed_digit = start digit end
start = '['
end = ']'
real_digit = '6'""",
# In this particular implementation of the digit rule, no node is
# generated for `digit`; it falls right through to `real_digit`.
# I'm not sure if this could lead to problems; I can't think of
# any, but it's probably not a great idea.
digit=lambda text, pos, cache, error, grammar:
grammar['real_digit'].match_core(text, pos, cache, error))
s = '[6]'
eq_(grammar.parse(s),
Node('bracketed_digit', s, 0, 3, children=[
Node('start', s, 0, 1),
Node('real_digit', s, 1, 2),
Node('end', s, 2, 3)]))
def test_lazy_custom_rules(self):
"""Make sure LazyReferences manually shoved into custom rules are
resolved.
Incidentally test passing full-on Expressions as custom rules and
having a custom rule as the default one.
"""
grammar = Grammar("""
four = '4'
five = '5'""",
forty_five=Sequence(LazyReference('four'),
LazyReference('five'),
name='forty_five')).default('forty_five')
s = '45'
eq_(grammar.parse(s),
Node('forty_five', s, 0, 2, children=[
Node('four', s, 0, 1),
Node('five', s, 1, 2)]))
def test_unconnected_custom_rules(self):
"""Make sure custom rules that aren't hooked to any other rules still
get included in the grammar and that lone ones get set as the
default.
Incidentally test Grammar's `rules` default arg.
"""
grammar = Grammar(one_char=lambda text, pos: pos + 1).default('one_char')
s = '4'
eq_(grammar.parse(s),
Node('one_char', s, 0, 1))
def test_lazy_default_rule(self):
"""Make sure we get an actual rule set as our default rule, even when
the first rule has forward references and is thus a LazyReference at
some point during grammar compilation.
"""
grammar = Grammar(r"""
styled_text = text
text = "hi"
""")
eq_(grammar.parse('hi'), Node('text', 'hi', 0, 2))
def test_immutable_grammar(self):
"""Make sure that a Grammar is immutable after being created."""
grammar = Grammar(r"""
foo = 'bar'
""")
def mod_grammar(grammar):
grammar['foo'] = 1
assert_raises(TypeError, mod_grammar, [grammar])
def mod_grammar(grammar):
new_grammar = Grammar(r"""
baz = 'biff'
""")
grammar.update(new_grammar)
assert_raises(AttributeError, mod_grammar, [grammar])
class TokenGrammarTests(TestCase):
"""Tests for the TokenGrammar class and associated machinery"""
def test_parse_success(self):
"""Token literals should work."""
s = [Token('token1'), Token('token2')]
grammar = TokenGrammar("""
foo = token1 "token2"
token1 = "token1"
""")
eq_(grammar.parse(s),
Node('foo', s, 0, 2, children=[
Node('token1', s, 0, 1),
Node('', s, 1, 2)]))
def test_parse_failure(self):
"""Parse failures should work normally with token literals."""
grammar = TokenGrammar("""
foo = "token1" "token2"
""")
assert_raises(ParseError,
grammar.parse,
[Token('tokenBOO'), Token('token2')])
|
smurfix/parsimonious
|
parsimonious/tests/test_grammar.py
|
Python
|
mit
| 16,132
|
[
"VisIt"
] |
2404961055aa3f2a7451809dcb76707cf0367bbc409940ae21cc9b2166d9c6ab
|
import __main__
import unittest, copy, shutil, tempfile
from numpy.testing import assert_array_equal
from param import normalize_path,resolve_path
from imagen import Gaussian, Line
import topo
from topo.sheet import GeneratorSheet
from topo.command import save_snapshot,load_snapshot
from topo.base.simulation import Simulation
SNAPSHOT_NAME = "testsnapshot.typ"
SIM_NAME = "testsnapshots"
class TestSnapshots(unittest.TestCase):
# CB: all tests that use topo.sim ought to do make a new topo.sim
def setUp(self):
"""
Create a new Simulation as topo.sim (so this test isn't affected by changes
to topo.sim by other tests).
"""
Simulation(register=True,name=SIM_NAME)
self.original_output_path = normalize_path.prefix
normalize_path.prefix = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(normalize_path.prefix)
normalize_path.prefix=self.original_output_path
def basic_save_load_snapshot(self):
"""
Very basic test to check the activity matrix of a GeneratorSheet
comes back ok, and that class attributes are pickled.
"""
assert topo.sim.name==SIM_NAME
topo.sim['R']=GeneratorSheet(input_generator=Gaussian(),nominal_density=2)
topo.sim.run(1)
R_act = copy.deepcopy(topo.sim['R'].activity)
Line.x = 12.0
topo.sim.startup_commands.append("z=99")
save_snapshot(SNAPSHOT_NAME)
Line.x = 9.0
exec "z=88" in __main__.__dict__
topo.sim['R'].set_input_generator(Line())
topo.sim.run(1)
load_snapshot(resolve_path(SNAPSHOT_NAME,search_paths=[normalize_path.prefix]))
# CEBALERT: should also test that unpickling order is correct
# (i.e. startup_commands, class attributes, simulation)
assert_array_equal(R_act,topo.sim['R'].activity)
self.assertEqual(Line.x,12.0)
self.assertEqual(__main__.__dict__['z'],99)
def test_basic_save_load_snapshot(self):
self.basic_save_load_snapshot()
def test_new_simulation_still_works(self):
# Test to make sure the above tests haven't screwed up
# the ability to construct new simulation objects
topo.base.simulation.Simulation()
# CB: longer to run test should additionally quit the simulation
# and start again. Should also test scheduled actions.
if __name__ == "__main__":
import nose
nose.runmodule()
|
ioam/topographica
|
topo/tests/unit/testsnapshots.py
|
Python
|
bsd-3-clause
| 2,461
|
[
"Gaussian"
] |
7662ebc755a1476b4d16f50e2b7211fea37ee487b59dffed76baebfef117fc57
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of the PyNeurActiv project, which aims at providing tools
# to study and model the activity of neuronal cultures.
# Copyright (C) 2017 SENeC Initiative
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Tools to compute the properties of the activity """
import numpy as np
from scipy.special import lambertw
def out_of_eq_aeif(params):
'''
Check whether aEIF neuronal parameters lead to a stable equilibrium.
Args:
params (dict): Dictionnary of the neuron's parameters with the keys
formatted as returned by ``nest.GetStatus(aeif_neuron)``. It must
contain at least "E_L", "I_e", "a", and "tau_w".
Returns:
out_of_eq (bool): ``True`` if the neuron is out of equilibrium;
``False`` if a fixed point exists.
'''
A = params['E_L'] + params['I_e'] / (1 + params['a'])
aPlusOne = 1 + params['a']
has_FP = aPlusOne - np.exp(1 + A) > 0.
V0minus = np.real(A - lambertw(- np.exp(A) / aPlusOne, 0))
stable_FP = np.exp(V0minus) - 1. < min(aPlusOne, 1./ params['tau_w'])
out_of_eq = not (has_FP and stable_FP)
return out_of_eq
def Vprime_aeif(V, w, EL, Ie, Is):
'''
Returns the derivative of the membrane potential for an aEIF neuron.
Args:
V (double): membrane potential.
w (double): adaptation variable.
EL (double): resting potential.
Ie (double): constant stimulation current.
Is (double): synaptic current.
Returns:
-(V-EL) + np.exp(V) + Ie + Is - w
'''
return -(V-EL) + np.exp(V) + Ie + Is - w
def inv_Vprime_aeif(V, w, EL, Ie, Is):
'''
Inverse of Vprime_aeif.
See Also:
:func:`Vprime_aeif`
'''
return 1. / Vprime_aeif(V, w, EL, Ie, Is)
|
Silmathoron/PyNeurActiv
|
lib/aeif.py
|
Python
|
gpl-3.0
| 2,426
|
[
"NEURON"
] |
4d73b8a73dc59880ff6a7ad3464ce2c47c04a3a46247b0d1754d154909c41b76
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import socket
import os
import re
import requests
import json
from hashlib import sha256
from urlparse import urljoin
from urllib import quote
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import electrum_dash
from electrum_dash import bitcoin
from electrum_dash.bitcoin import *
from electrum_dash.mnemonic import Mnemonic
from electrum_dash import version
from electrum_dash.wallet import Multisig_Wallet, BIP32_Wallet
from electrum_dash.i18n import _
from electrum_dash.plugins import BasePlugin, run_hook, hook
from electrum_dash_gui.qt.util import *
from electrum_dash_gui.qt.qrcodewidget import QRCodeWidget
from electrum_dash_gui.qt.amountedit import AmountEdit
from electrum_dash_gui.qt.main_window import StatusBarButton
from decimal import Decimal
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
signing_xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
billing_xpub = "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/', debug=False):
self.base_url = base_url
self.debug = debug
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print '%s %s %s' % (method, url, data)
response = requests.request(method, url, **kwargs)
if self.debug:
print response.text
print
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
return self.send_request('get', 'cosigner/%s' % quote(id))
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Tranfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
def __init__(self, storage):
BIP32_Wallet.__init__(self, storage)
self.wallet_type = '2fa'
self.m = 2
self.n = 3
def get_action(self):
xpub1 = self.master_public_keys.get("x1/")
xpub2 = self.master_public_keys.get("x2/")
xpub3 = self.master_public_keys.get("x3/")
if xpub2 is None and not self.storage.get('use_trustedcoin'):
return 'show_disclaimer'
if xpub2 is None:
return 'create_extended_seed'
if xpub3 is None:
return 'create_remote_key'
if not self.accounts:
return 'create_accounts'
def make_seed(self):
return Mnemonic('english').make_seed(num_bits=256, prefix=SEED_PREFIX)
def estimated_fee(self, tx):
fee = Multisig_Wallet.estimated_fee(self, tx)
x = run_hook('extra_fee', tx)
if x: fee += x
return fee
def get_tx_fee(self, tx):
fee = Multisig_Wallet.get_tx_fee(self, tx)
x = run_hook('extra_fee', tx)
if x: fee += x
return fee
class Plugin(BasePlugin):
wallet = None
def __init__(self, x, y):
BasePlugin.__init__(self, x, y)
self.seed_func = lambda x: bitcoin.is_new_seed(x, SEED_PREFIX)
self.billing_info = None
self.is_billing = False
def constructor(self, s):
return Wallet_2fa(s)
def is_available(self):
if not self.wallet:
return False
if self.wallet.storage.get('wallet_type') == '2fa':
return True
return False
def set_enabled(self, enabled):
self.wallet.storage.put('use_' + self.name, enabled)
def is_enabled(self):
if not self.is_available():
return False
if self.wallet.master_private_keys.get('x2/'):
return False
return True
def make_long_id(self, xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
def get_user_id(self):
xpub_hot = self.wallet.master_public_keys["x1/"]
xpub_cold = self.wallet.master_public_keys["x2/"]
long_id = self.make_long_id(xpub_hot, xpub_cold)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(self, xpub, s):
_, _, _, c, cK = deserialize_xkey(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
xpub2 = ("0488B21E" + "00" + "00000000" + "00000000").decode("hex") + c2 + cK2
return EncodeBase58Check(xpub2)
def make_billing_address(self, num):
long_id, short_id = self.get_user_id()
xpub = self.make_xpub(billing_xpub, long_id)
_, _, _, c, cK = deserialize_xkey(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
address = public_key_to_bc_address( cK )
return address
def create_extended_seed(self, wallet, window):
seed = wallet.make_seed()
if not window.show_seed(seed, None):
return
if not window.verify_seed(seed, None, self.seed_func):
return
password = window.password_dialog()
wallet.storage.put('seed_version', wallet.seed_version, True)
wallet.storage.put('use_encryption', password is not None, True)
words = seed.split()
n = len(words)/2
wallet.add_cosigner_seed(' '.join(words[0:n]), 'x1/', password)
wallet.add_cosigner_xpub(' '.join(words[n:]), 'x2/')
msg = [
_('Your wallet file is:') + " %s"%os.path.abspath(wallet.storage.path),
_('You need to be online in order to complete the creation of your wallet.'),
_('If you generated your seed on an offline computer, click on "%s" to close this window, move your wallet file to an online computer and reopen it with Electrum.') % _('Close'),
_('If you are online, click on "%s" to continue.') % _('Next')
]
return window.question('\n\n'.join(msg), no_label=_('Close'), yes_label=_('Next'))
def show_disclaimer(self, wallet, window):
msg = [
_("Two-factor authentication is a service provided by TrustedCoin.") + ' ',
_("It uses a multi-signature wallet, where you own 2 of 3 keys.") + ' ',
_("The third key is stored on a remote server that signs transactions on your behalf.") + ' ',
_("To use this service, you will need a smartphone with Google Authenticator.") + '\n\n',
_("A small fee will be charged on each transaction that uses the remote server.") + ' ',
_("You may check and modify your billing preferences once the installation is complete.") + '\n\n',
_("Note that your coins are not locked in this service.") + ' ',
_("You may withdraw your funds at any time and at no cost, without the remote server, by using the 'restore wallet' option with your wallet seed.") + '\n\n',
_('The next step will generate the seed of your wallet.') + ' ',
_('This seed will NOT be saved in your computer, and it must be stored on paper.') + ' ',
_('To be safe from malware, you may want to do this on an offline computer, and move your wallet later to an online computer.')
]
icon = QPixmap(':icons/trustedcoin.png')
if not window.question(''.join(msg), icon=icon):
return False
self.wallet = wallet
self.set_enabled(True)
return True
def restore_third_key(self, wallet):
long_user_id, short_id = self.get_user_id()
xpub3 = self.make_xpub(signing_xpub, long_user_id)
wallet.add_master_public_key('x3/', xpub3)
@hook
def do_clear(self):
self.is_billing = False
@hook
def load_wallet(self, wallet, window):
self.wallet = wallet
self.window = window
self.trustedcoin_button = StatusBarButton(QIcon(":icons/trustedcoin.png"), _("TrustedCoin"), self.settings_dialog)
self.window.statusBar().addPermanentWidget(self.trustedcoin_button)
self.xpub = self.wallet.master_public_keys.get('x1/')
self.user_id = self.get_user_id()[1]
t = threading.Thread(target=self.request_billing_info)
t.setDaemon(True)
t.start()
@hook
def installwizard_load_wallet(self, wallet, window):
self.wallet = wallet
self.window = window
@hook
def close_wallet(self):
self.window.statusBar().removeWidget(self.trustedcoin_button)
@hook
def get_wizard_action(self, window, wallet, action):
if hasattr(self, action):
return getattr(self, action)
@hook
def installwizard_restore(self, window, storage):
if storage.get('wallet_type') != '2fa':
return
seed = window.enter_seed_dialog("Enter your seed", None, func=self.seed_func)
if not seed:
return
wallet = Wallet_2fa(storage)
self.wallet = wallet
password = window.password_dialog()
wallet.add_seed(seed, password)
words = seed.split()
n = len(words)/2
wallet.add_cosigner_seed(' '.join(words[0:n]), 'x1/', password)
wallet.add_cosigner_seed(' '.join(words[n:]), 'x2/', password)
self.restore_third_key(wallet)
wallet.create_main_account(password)
# disable plugin
self.set_enabled(False)
return wallet
def create_remote_key(self, wallet, window):
self.wallet = wallet
self.window = window
if wallet.storage.get('wallet_type') != '2fa':
raise
return
email = self.accept_terms_of_use(window)
if not email:
return
xpub_hot = wallet.master_public_keys["x1/"]
xpub_cold = wallet.master_public_keys["x2/"]
# Generate third key deterministically.
long_user_id, self.user_id = self.get_user_id()
xpub3 = self.make_xpub(signing_xpub, long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub_hot, xpub_cold, email)
except socket.error:
self.window.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
raise e
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
self.window.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == self.user_id, ("user id error", _id, self.user_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
self.window.show_message(str(e))
return
if not self.setup_google_auth(self.window, self.user_id, otp_secret):
return
self.wallet.add_master_public_key('x3/', xpub3)
return True
def need_server(self, tx):
from electrum_dash.account import BIP32_Account
# Detect if the server is needed
long_id, short_id = self.get_user_id()
xpub3 = self.wallet.master_public_keys['x3/']
for x in tx.inputs_to_sign():
if x[0:2] == 'ff':
xpub, sequence = BIP32_Account.parse_xpubkey(x)
if xpub == xpub3:
return True
return False
@hook
def sign_tx(self, tx):
self.print_error("twofactor:sign_tx")
if self.wallet.storage.get('wallet_type') != '2fa':
return
if not self.need_server(tx):
self.print_error("twofactor: xpub3 not needed")
self.auth_code = None
return
self.auth_code = self.auth_dialog()
@hook
def before_send(self):
# request billing info before forming the transaction
self.billing_info = None
self.waiting_dialog = WaitingDialog(self.window, 'please wait...', self.request_billing_info)
self.waiting_dialog.start()
self.waiting_dialog.wait()
if self.billing_info is None:
self.window.show_message('Could not contact server')
return True
return False
@hook
def extra_fee(self, tx):
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
# trustedcoin won't charge if the total inputs is lower than their fee
price = int(self.price_per_tx.get(1))
assert price <= 100000
if tx.input_value() < price:
self.print_error("not charging for this tx")
return 0
return price
@hook
def make_unsigned_transaction(self, tx):
price = self.extra_fee(tx)
if not price:
return
tx.outputs.append(('address', self.billing_info['billing_address'], price))
@hook
def sign_transaction(self, tx, password):
self.print_error("twofactor:sign")
if self.wallet.storage.get('wallet_type') != '2fa':
self.print_error("twofactor: aborting")
return
self.long_user_id, self.user_id = self.get_user_id()
if not self.auth_code:
return
if tx.is_complete():
return
tx_dict = tx.as_dict()
raw_tx = tx_dict["hex"]
try:
r = server.sign(self.user_id, raw_tx, self.auth_code)
except Exception as e:
tx.error = str(e)
return
self.print_error( "received answer", r)
if not r:
return
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
def auth_dialog(self ):
d = QDialog(self.window)
d.setModal(1)
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def settings_dialog(self):
self.waiting_dialog = WaitingDialog(self.window, 'please wait...', self.request_billing_info, self.show_settings_dialog)
self.waiting_dialog.start()
def show_settings_dialog(self, success):
if not success:
self.window.show_message(_('Server not reachable.'))
return
d = QDialog(self.window)
d.setWindowTitle("TrustedCoin Information")
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a fee per co-signed transaction. You may pay on each transaction (an extra output will be added to your transaction), or you may purchase prepaid transaction using this dialog.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
v = self.price_per_tx.get(1)
grid.addWidget(QLabel(_("Price per transaction (not prepaid):")), 0, 0)
grid.addWidget(QLabel(self.window.format_amount(v) + ' ' + self.window.base_unit()), 0, 1)
i = 1
if 10 not in self.price_per_tx:
self.price_per_tx[10] = 10 * self.price_per_tx.get(1)
for k, v in sorted(self.price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Price for %d prepaid transactions:"%k), i, 0)
grid.addWidget(QLabel("%d x "%k + self.window.format_amount(v/k) + ' ' + self.window.base_unit()), i, 1)
b = QPushButton(_("Buy"))
b.clicked.connect(lambda b, k=k, v=v: self.on_buy(k, v, d))
grid.addWidget(b, i, 2)
i += 1
n = self.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has %d prepaid transactions.")%n), i, 0)
# tranfer button
#def on_transfer():
# server.transfer_credit(self.user_id, recipient, otp, signature_callback)
# pass
#b = QPushButton(_("Transfer"))
#b.clicked.connect(on_transfer)
#grid.addWidget(b, 1, 2)
#grid.addWidget(QLabel(_("Next Billing Address:")), i, 0)
#grid.addWidget(QLabel(self.billing_info['billing_address']), i, 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, k, v, d):
d.close()
if self.window.pluginsdialog:
self.window.pluginsdialog.close()
uri = "dash:" + self.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
self.is_billing = True
self.window.pay_from_URI(uri)
self.window.payto_e.setFrozen(True)
self.window.message_e.setFrozen(True)
self.window.amount_e.setFrozen(True)
def request_billing_info(self):
billing_info = server.get(self.user_id)
billing_address = self.make_billing_address(billing_info['billing_index'])
assert billing_address == billing_info['billing_address']
self.billing_info = billing_info
self.price_per_tx = dict(self.billing_info['price_per_tx'])
return True
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
window.set_layout(vbox)
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = QTextEdit()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
vbox.addStretch()
accept_button = OkButton(window, _('Accept'))
accept_button.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(window), accept_button))
def request_TOS():
tos = server.get_terms_of_service()
self.TOS = tos
window.emit(SIGNAL('twofactor:TOS'))
def on_result():
tos_e.setText(self.TOS)
window.connect(window, SIGNAL('twofactor:TOS'), on_result)
t = threading.Thread(target=request_TOS)
t.setDaemon(True)
t.start()
regexp = r"[^@]+@[^@]+\.[^@]+"
email_e.textChanged.connect(lambda: accept_button.setEnabled(re.match(regexp,email_e.text()) is not None))
email_e.setFocus(True)
if not window.exec_():
return
email = str(email_e.text())
return email
def setup_google_auth(self, window, _id, otp_secret):
vbox = QVBoxLayout()
window.set_layout(vbox)
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
vbox.addWidget(QLabel("Please scan this QR code in Google Authenticator."))
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel("This wallet is already registered, but it was never authenticated. To finalize your registration, please enter your Google Authenticator Code. If you do not have this code, delete the wallet file and start a new registration")
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(QLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
hbox.addWidget(pw)
hbox.addStretch(1)
vbox.addLayout(hbox)
b = OkButton(window, _('Next'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(window), b))
pw.textChanged.connect(lambda: b.setEnabled(len(pw.text())==6))
while True:
if not window.exec_():
return False
otp = pw.get_amount()
try:
server.auth(_id, otp)
return True
except:
QMessageBox.information(self.window, _('Message'), _('Incorrect password'), _('OK'))
pw.setText('')
|
mazaclub/electrum-dash
|
plugins/trustedcoin.py
|
Python
|
gpl-3.0
| 25,605
|
[
"VisIt"
] |
67b4a39f2208266de5978233d3ce810745b9ca6f26220f0d030717971537635f
|
from .system import System
from .molecule import Molecule, guess_bonds
from .atom import Atom
from .system import (subsystem_from_molecules,
subsystem_from_atoms,
merge_systems)
from .trajectory import Trajectory
from .spacegroup.crystal import crystal
from .random import random_lattice_box, random_box
|
chemlab/chemlab
|
chemlab/core/__init__.py
|
Python
|
gpl-3.0
| 347
|
[
"CRYSTAL"
] |
f0315d77ec124a3a2172af12cae15ec261b91dbe3216f7fdbba7c55cd92d9fc9
|
from builtins import range
import sys, os
sys.path.insert(1, os.path.join("..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
def weights_and_distributions():
htable = h2o.upload_file(pyunit_utils.locate("smalldata/gbm_test/moppe.csv"))
htable["premiekl"] = htable["premiekl"].asfactor()
htable["moptva"] = htable["moptva"].asfactor()
htable["zon"] = htable["zon"]
# gamma
dl = H2ODeepLearningEstimator(distribution="gamma")
dl.train(x=list(range(3)),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# gaussian
dl = H2ODeepLearningEstimator(distribution="gaussian")
dl.train(x=list(range(3)),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# poisson
dl = H2ODeepLearningEstimator(distribution="poisson")
dl.train(x=list(range(3)),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
# tweedie
dl = H2ODeepLearningEstimator(distribution="tweedie")
dl.train(x=list(range(3)),y="medskad",training_frame=htable, weights_column="antskad")
predictions = dl.predict(htable)
if __name__ == "__main__":
pyunit_utils.standalone_test(weights_and_distributions)
else:
weights_and_distributions()
|
YzPaul3/h2o-3
|
h2o-py/tests/testdir_algos/deeplearning/pyunit_weights_and_distributions_deeplearning.py
|
Python
|
apache-2.0
| 1,335
|
[
"Gaussian"
] |
5307a307ffd5eef0db0c2d749257f7e7b5099279febce988281a9b93dcb7bb8e
|
import opencor as oc
import sys
sys.dont_write_bytecode = True
import utils
if __name__ == '__main__':
# Test the Noble 1962 model using different solvers
utils.run_simulations('noble_model_1962.cellml', 'Noble 1962 model')
|
agarny/opencor
|
src/plugins/support/PythonSupport/tests/data/noble1962tests.py
|
Python
|
gpl-3.0
| 236
|
[
"OpenCOR"
] |
db476bf4a84a2b76dea8f31c83cd4d95f6183129409cf73025e0ab89c8768f46
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import autograd.numpy as np
from ase.build import bulk
from ase.neighborlist import NeighborList
from ase.calculators.lj import LennardJones
from dap.ag.neighborlist import (get_distances, get_neighbors,
get_neighbors_oneway)
from dap.ag.lennardjones import (energy, forces, stress, energy_oneway,
forces_oneway, stress_oneway)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class TestAGNeighborListBothways(unittest.TestCase):
def test0(self):
"""Check the fcc cell neighbors in a variety of repeats."""
a = 3.6
for cutoff_radius in np.linspace(a / 2, 5 * a, 10):
for rep in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2),
(2, 1, 1), (2, 2, 1), (2, 2, 2), (1, 2, 3), (4, 1, 1)):
atoms = bulk('Cu', 'fcc', a=a).repeat(rep)
nl = NeighborList(
[cutoff_radius / 2] * len(atoms),
skin=0.01,
self_interaction=False,
bothways=True)
nl.update(atoms)
nns_ase = [len(nl.get_neighbors(i)[0]) for i in range(len(atoms))]
d, _ = get_distances(atoms.positions, atoms.cell, cutoff_radius)
inds = (d <= (cutoff_radius + 0.01)) & (d > 0.00)
nns = inds.sum((1, 2))
self.assertTrue(np.all(nns_ase == nns))
class TestAGNeighborListOneWay(unittest.TestCase):
def test0(self):
"""check one-way neighborlist for fcc on different repeats."""
a = 3.6
for rep in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2),
(2, 1, 1), (2, 2, 1), (2, 2, 2), (1, 2, 3), (4, 1, 1)):
for cutoff_radius in np.linspace(a / 2.1, 5 * a, 5):
atoms = bulk('Cu', 'fcc', a=a).repeat(rep)
# It is important to rattle the atoms off the lattice points.
# Otherwise, float tolerances makes it hard to count correctly.
atoms.rattle(0.02)
nl = NeighborList(
[cutoff_radius / 2] * len(atoms),
skin=0.0,
self_interaction=False,
bothways=False)
nl.update(atoms)
neighbors, displacements = get_neighbors_oneway(
atoms.positions, atoms.cell, cutoff_radius, skin=0.0)
for i in range(len(atoms)):
an, ad = nl.get_neighbors(i)
# Check the same number of neighbors
self.assertEqual(len(neighbors[i]), len(an))
# Check the same indices
self.assertCountEqual(neighbors[i], an)
# I am not sure how to test for the displacements.
class TestAGLennardJones(unittest.TestCase):
def test_fcc(self):
'Check LJ with structures and repeats with different symmetries.'
for struct in ['fcc', 'bcc', 'diamond']:
for repeat in [(1, 1, 1), (1, 1, 2), (1, 2, 1), (2, 1, 1), (1, 2, 3),
(2, 2, 2)]:
atoms = bulk('Cu', struct, a=3.7).repeat(repeat)
atoms.rattle(0.02)
atoms.set_calculator(LennardJones())
ase_energy = atoms.get_potential_energy()
lj_energy = energy({}, atoms.positions, atoms.cell)
self.assertAlmostEqual(ase_energy, lj_energy)
lj_forces = forces({}, atoms.positions, atoms.cell)
self.assertTrue(np.allclose(atoms.get_forces(), lj_forces))
lj_stress = stress({}, atoms.positions, atoms.cell)
self.assertTrue(np.allclose(atoms.get_stress(), lj_stress))
class TestLennardJonesOneWay(unittest.TestCase):
def test_fcc(self):
"""Check LJ with oneway neighbors.
Uses structures and repeats with different symmetries."""
for struct in ['fcc', 'bcc', 'diamond']:
for repeat in [(1, 1, 1), (1, 1, 2), (1, 2, 1), (2, 1, 1), (1, 2, 3),
(2, 2, 2)]:
atoms = bulk('Cu', struct, a=3.7).repeat(repeat)
atoms.rattle(0.02)
atoms.set_calculator(LennardJones())
ase_energy = atoms.get_potential_energy()
lj_energy = energy_oneway({}, atoms.positions, atoms.cell)
self.assertAlmostEqual(ase_energy, lj_energy)
lj_forces = forces_oneway({}, atoms.positions, atoms.cell)
self.assertTrue(np.allclose(atoms.get_forces(), lj_forces))
lj_stress = stress_oneway({}, atoms.positions, atoms.cell)
self.assertTrue(np.allclose(atoms.get_stress(), lj_stress))
|
google/differentiable-atomistic-potentials
|
dap/tests/test_ag.py
|
Python
|
apache-2.0
| 4,875
|
[
"ASE"
] |
6f8e1ec0faefa7bee6f2422d476977cb7f5d749259939561a7c0d7db95fed2bd
|
import matplotlib.pyplot as plt
import os
from astropy.table import Table
import numpy as np
# setup information sources
degas = Table.read(os.path.join(os.environ['SCRIPTDIR'],'degas_base.fits'))
stack = Table.read('/lustre/cv/users/akepley/degas/stack_test/stack_IR6p0_mom1.fits')
plotDir = os.path.join(os.environ['ANALYSISDIR'],'plots','fdense_plots')
if not os.path.exists(plotDir):
os.mkdir(plotDir)
# only look at dr1 galaxies
dr1 = degas['DR1'] == 1
ndr1 = np.sum(dr1)
# setup plot style
markers = ['o','v','^','s','*','D'] # 6 items
colors = ['royalblue','forestgreen','darkorange','royalblue','crimson','rebeccapurple','darkcyan','darkmagenta']
ndr1 = np.sum(dr1)
markerlist = np.tile(markers,int(np.ceil(ndr1/len(markers))))
markerlist = markerlist[0:ndr1]
colorlist = np.tile(colors,int(np.ceil(ndr1/len(colors))))
colorlist = colorlist[0:ndr1]
# set up plot
fig = plt.figure(figsize=(8,6),facecolor='white',edgecolor='white')
fig.subplots_adjust(left=0.1,right=0.8,bottom=0.1, top=0.9)
ax = fig.add_subplot(1,1,1)
# for each dr1 galaxy, show radial trends for each line.
for (galaxy,color,marker) in zip(degas[dr1],colorlist,markerlist):
idx = ( (stack['galaxy'] == galaxy['NAME']) \
& (stack['bin_type'] == 'radius'))
# radius is in arcsec.
r25 = stack[idx]['bin_mean'] / (galaxy['R25_DEG'] * 3600.0)
lolims = stack[idx]['ratio_ltir_mean_HCN_lolim']
sfe_dense = stack[idx]['ratio_ltir_mean_HCN']
sfe_dense_err = stack[idx]['ratio_ltir_mean_HCN_err']
sfe_dense_err[lolims] = sfe_dense[lolims] * 0.3
ax.errorbar(r25[~lolims], sfe_dense[~lolims],
yerr = sfe_dense_err[~lolims],
marker = marker,
linestyle= '--',
color=color,
label=galaxy['NAME'])
ax.set_yscale('log')
ax.legend(loc='upper left',bbox_to_anchor=(1.0,1.0))
ax.set_xlabel(r'r / r$_{25}$')
ax.set_ylabel(r'log$_{10}$ (IR-to-HCN)')
fig.show()
fig.savefig(os.path.join(plotDir,'sfe_dense_vs_r25_combined_nolim.pdf'))
fig.savefig(os.path.join(plotDir,'sfe_dense_vs_r25_combined_nolim.png'))
plt.close()
|
low-sky/degas
|
scripts/plot_SFEdense_vs_r25_combined_nolim.py
|
Python
|
gpl-3.0
| 2,162
|
[
"Galaxy"
] |
3aec4bf7cf1de65c8cc49a3748c6bfa0eff0b1623e506710891db69daeb6c003
|
# -*- coding: utf-8 -*-
from io import StringIO, BufferedReader
import os
import string
import py
import pytest
import hashfs
from hashfs._compat import to_bytes
@pytest.fixture
def testpath(tmpdir):
return tmpdir.mkdir('hashfs')
@pytest.fixture
def testfile(testpath):
return testpath.join('hashfs.txt')
@pytest.fixture
def stringio():
return StringIO(u'foo')
@pytest.yield_fixture
def fileio(testfile):
with open(str(testfile), 'wb') as io:
io.write(b'foo')
io = open(str(testfile), 'rb')
yield io
io.close()
@pytest.fixture
def filepath(testfile):
testfile.write(b'foo')
return testfile
@pytest.fixture
def fs(testpath):
return hashfs.HashFS(str(testpath))
def put_range(fs, count):
return dict((address.abspath, address)
for address in (fs.put(StringIO(u'{0}'.format(i)))
for i in range(count)))
def assert_file_put(fs, address):
directory = os.path.dirname(address.relpath)
dir_parts = [part for part in directory.split(os.path.sep) if part]
assert address.abspath in tuple(py.path.local(fs.root).visit())
assert fs.exists(address.id)
id = os.path.splitext(address.relpath.replace(os.path.sep, ''))[0]
assert id == address.id
assert len(dir_parts) == fs.depth
assert all(len(part) == fs.width for part in dir_parts)
def test_hashfs_put_stringio(fs, stringio):
address = fs.put(stringio)
assert_file_put(fs, address)
with open(address.abspath, 'rb') as fileobj:
assert fileobj.read() == to_bytes(stringio.getvalue())
def test_hashfs_put_fileobj(fs, fileio):
address = fs.put(fileio)
assert_file_put(fs, address)
with open(address.abspath, 'rb') as fileobj:
assert fileobj.read() == fileio.read()
def test_hashfs_put_file(fs, filepath):
address = fs.put(str(filepath))
assert_file_put(fs, address)
with open(address.abspath, 'rb') as fileobj:
assert fileobj.read() == to_bytes(filepath.read())
def test_hashfs_put_duplicate(fs, stringio):
address_a = fs.put(stringio)
address_b = fs.put(stringio)
assert not address_a.is_duplicate
assert address_b.is_duplicate
@pytest.mark.parametrize('extension', [
'txt',
'.txt',
'md',
'.md'
])
def test_hashfs_put_extension(fs, stringio, extension):
address = fs.put(stringio, extension)
assert_file_put(fs, address)
assert os.path.sep in address.abspath
assert os.path.splitext(address.abspath)[1].endswith(extension)
assert not address.is_duplicate
def test_hashfs_put_error(fs):
with pytest.raises(ValueError):
fs.put('foo')
def test_hashfs_address(fs, stringio):
address = fs.put(stringio)
assert fs.root not in address.relpath
assert os.path.join(fs.root, address.relpath) == address.abspath
assert address.relpath.replace(os.sep, '') == address.id
assert not address.is_duplicate
@pytest.mark.parametrize('extension,address_attr', [
('', 'id'),
('.txt', 'id'),
('txt', 'id'),
('', 'abspath'),
('.txt', 'abspath'),
('txt', 'abspath'),
])
def test_hashfs_open(fs, stringio, extension, address_attr):
address = fs.put(stringio, extension)
fileobj = fs.open(getattr(address, address_attr))
assert isinstance(fileobj, BufferedReader)
assert fileobj.read() == to_bytes(stringio.getvalue())
fileobj.close()
def test_hashfs_open_error(fs):
with pytest.raises(IOError):
fs.open('invalid')
def test_hashfs_exists(fs, stringio):
address = fs.put(stringio)
assert fs.exists(address.id)
assert fs.exists(address.relpath)
assert fs.exists(address.abspath)
def test_hashfs_contains(fs, stringio):
address = fs.put(stringio)
assert address.id in fs
assert address.relpath in fs
assert address.abspath in fs
def test_hashfs_get(fs, stringio):
address = fs.put(stringio)
assert not address.is_duplicate
assert fs.get(address.id) == address
assert fs.get(address.relpath) == address
assert fs.get(address.abspath) == address
assert fs.get('invalid') is None
@pytest.mark.parametrize('address_attr', [
'id',
'abspath',
])
def test_hashfs_delete(fs, stringio, address_attr):
address = fs.put(stringio)
fs.delete(getattr(address, address_attr))
assert len(os.listdir(fs.root)) == 0
def test_hashfs_delete_error(fs):
fs.delete('invalid')
def test_hashfs_remove_empty(fs):
subpath1 = os.path.join(fs.root, '1', '2', '3')
subpath2 = os.path.join(fs.root, '1', '4', '5')
subpath3 = os.path.join(fs.root, '6', '7', '8')
fs.makepath(subpath1)
fs.makepath(subpath2)
fs.makepath(subpath3)
assert os.path.exists(subpath1)
assert os.path.exists(subpath2)
assert os.path.exists(subpath3)
fs.remove_empty(subpath1)
fs.remove_empty(subpath3)
assert not os.path.exists(subpath1)
assert os.path.exists(subpath2)
assert not os.path.exists(subpath3)
def test_hashfs_remove_empty_subdir(fs):
fs.remove_empty(fs.root)
assert os.path.exists(fs.root)
fs.remove_empty(os.path.realpath(os.path.join(fs.root, '..')))
assert os.path.exists(fs.root)
def test_hashfs_unshard(fs, stringio):
address = fs.put(stringio)
assert fs.unshard(address.abspath) == address.id
def test_hashfs_unshard_error(fs):
with pytest.raises(ValueError):
fs.unshard('invalid')
def test_hashfs_repair(fs, stringio):
original_address = fs.put(stringio)
newfs = hashfs.HashFS(fs.root, depth=1)
repaired = newfs.repair()
assert len(repaired) == 1
original_path, address = repaired[0]
assert original_path == original_address.abspath
assert not os.path.isfile(original_path)
assert_file_put(newfs, address)
def test_hashfs_repair_duplicates(fs, stringio):
original_address = fs.put(stringio)
newfs = hashfs.HashFS(fs.root, depth=1)
newfs.put(stringio)
repaired = newfs.repair()
assert len(repaired) == 1
original_path, address = repaired[0]
assert original_path == original_address.abspath
assert not os.path.isfile(original_path)
assert_file_put(newfs, address)
def test_hashfs_files(fs):
count = 5
addresses = put_range(fs, count)
files = list(fs.files())
assert len(files) == count
for file in files:
assert os.path.isfile(file)
assert file in addresses
assert addresses[file].abspath == file
assert addresses[file].id == fs.unshard(file)
def test_hashfs_iter(fs):
count = 5
addresses = put_range(fs, count)
test_count = 0
for file in fs:
test_count += 1
assert os.path.isfile(file)
assert file in addresses
assert addresses[file].abspath == file
assert addresses[file].id == fs.unshard(file)
assert test_count == count
def test_hashfs_count(fs):
count = 5
put_range(fs, count)
assert fs.count() == count
def test_hashfs_len(fs):
count = 5
put_range(fs, count)
assert len(fs) == count
def test_hashfs_folders(fs):
count = 5
addresses = put_range(fs, count)
folders = list(fs.folders())
assert len(folders) == count
for folder in folders:
assert os.path.exists(folder)
assert os.path.isfile(os.path.join(folder, os.listdir(folder)[0]))
def test_hashfs_size(fs):
fs.put(StringIO(u'{0}'.format(string.ascii_lowercase)))
fs.put(StringIO(u'{0}'.format(string.ascii_uppercase)))
expected = len(string.ascii_lowercase) + len(string.ascii_uppercase)
assert fs.size() == expected
|
bopo/hashfs
|
tests/test_hashfs.py
|
Python
|
mit
| 7,615
|
[
"VisIt"
] |
817ca3559f114c5be709630ca8789ebc56b969df4f65577e48dded91e0af66f2
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAbsseq(RPackage):
"""Inferring differential expression genes by absolute counts
difference between two groups, utilizing Negative binomial
distribution and moderating fold-change according to heterogeneity
of dispersion across expression level."""
homepage = "https://www.bioconductor.org/packages/ABSSeq/"
url = "https://git.bioconductor.org/packages/ABSSeq"
version('1.22.8', git='https://git.bioconductor.org/packages/ABSSeq', commit='a67ba49bc156a4522092519644f3ec83d58ebd6a')
depends_on('r@3.4.0:3.4.9', when='@1.22.8')
depends_on('r-locfit', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-absseq/package.py
|
Python
|
lgpl-2.1
| 1,928
|
[
"Bioconductor"
] |
e8354087f4e969a179b58fa09858e442e631f3a7ef5803756f2805364b786596
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from torch import nn
def identity(x):
return x
_str_to_activation = {
'identity': identity,
'relu': nn.ReLU(),
'tanh': nn.Tanh(),
'leaky_relu': nn.LeakyReLU(),
'sigmoid': nn.Sigmoid(),
'selu': nn.SELU(),
'softplus': nn.Softplus(),
}
def activation_from_string(string):
return _str_to_activation[string]
def soft_update_from_to(source, target, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
def copy_model_params_from_to(source, target):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def maximum_2d(t1, t2):
# noinspection PyArgumentList
return torch.max(
torch.cat((t1.unsqueeze(2), t2.unsqueeze(2)), dim=2),
dim=2,
)[0].squeeze(2)
def kronecker_product(t1, t2):
"""
Computes the Kronecker product between two tensors
See https://en.wikipedia.org/wiki/Kronecker_product
"""
t1_height, t1_width = t1.size()
t2_height, t2_width = t2.size()
out_height = t1_height * t2_height
out_width = t1_width * t2_width
# TODO(vitchyr): see if you can use expand instead of repeat
tiled_t2 = t2.repeat(t1_height, t1_width)
expanded_t1 = (
t1.unsqueeze(2)
.unsqueeze(3)
.repeat(1, t2_height, t2_width, 1)
.view(out_height, out_width)
)
return expanded_t1 * tiled_t2
def alpha_dropout(
x,
p=0.05,
alpha=-1.7580993408473766,
fixedPointMean=0,
fixedPointVar=1,
training=False,
):
keep_prob = 1 - p
if keep_prob == 1 or not training:
return x
a = np.sqrt(fixedPointVar / (keep_prob * (
(1 - keep_prob) * pow(alpha - fixedPointMean, 2) + fixedPointVar)))
b = fixedPointMean - a * (
keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
keep_prob = 1 - p
random_tensor = keep_prob + torch.rand(x.size())
binary_tensor = torch.floor(random_tensor)
x = x.mul(binary_tensor)
ret = x + alpha * (1 - binary_tensor)
ret.mul_(a).add_(b)
return ret
def alpha_selu(x, training=False):
return alpha_dropout(nn.SELU(x), training=training)
def double_moments(x, y):
"""
Returns the first two moments between x and y.
Specifically, for each vector x_i and y_i in x and y, compute their
outer-product. Flatten this resulting matrix and return it.
The first moments (i.e. x_i and y_i) are included by appending a `1` to x_i
and y_i before taking the outer product.
:param x: Shape [batch_size, feature_x_dim]
:param y: Shape [batch_size, feature_y_dim]
:return: Shape [batch_size, (feature_x_dim + 1) * (feature_y_dim + 1)
"""
batch_size, x_dim = x.size()
_, y_dim = x.size()
x = torch.cat((x, torch.ones(batch_size, 1)), dim=1)
y = torch.cat((y, torch.ones(batch_size, 1)), dim=1)
x_dim += 1
y_dim += 1
x = x.unsqueeze(2)
y = y.unsqueeze(1)
outer_prod = (
x.expand(batch_size, x_dim, y_dim) * y.expand(batch_size, x_dim,
y_dim)
)
return outer_prod.view(batch_size, -1)
def batch_diag(diag_values, diag_mask=None):
batch_size, dim = diag_values.size()
if diag_mask is None:
diag_mask = torch.diag(torch.ones(dim))
batch_diag_mask = diag_mask.unsqueeze(0).expand(batch_size, dim, dim)
batch_diag_values = diag_values.unsqueeze(1).expand(batch_size, dim, dim)
return batch_diag_values * batch_diag_mask
def batch_square_vector(vector, M):
"""
Compute x^T M x
"""
vector = vector.unsqueeze(2)
return torch.bmm(torch.bmm(vector.transpose(2, 1), M), vector).squeeze(2)
def fanin_init(tensor):
size = tensor.size()
if len(size) == 2:
fan_in = size[0]
elif len(size) > 2:
fan_in = np.prod(size[1:])
else:
raise Exception("Shape must be have dimension at least 2.")
bound = 1. / np.sqrt(fan_in)
return tensor.data.uniform_(-bound, bound)
def fanin_init_weights_like(tensor):
size = tensor.size()
if len(size) == 2:
fan_in = size[0]
elif len(size) > 2:
fan_in = np.prod(size[1:])
else:
raise Exception("Shape must be have dimension at least 2.")
bound = 1. / np.sqrt(fan_in)
new_tensor = FloatTensor(tensor.size())
new_tensor.uniform_(-bound, bound)
return new_tensor
def almost_identity_weights_like(tensor):
"""
Set W = I + lambda * Gaussian no
:param tensor:
:return:
"""
shape = tensor.size()
init_value = np.eye(*shape)
init_value += 0.01 * np.random.rand(*shape)
return FloatTensor(init_value)
def clip1(x):
return torch.clamp(x, -1, 1)
def compute_conv_output_size(h_in, w_in, kernel_size, stride, padding=0):
h_out = (h_in + 2 * padding - (kernel_size - 1) - 1) / stride + 1
w_out = (w_in + 2 * padding - (kernel_size - 1) - 1) / stride + 1
return int(np.floor(h_out)), int(np.floor(w_out))
def compute_deconv_output_size(h_in, w_in, kernel_size, stride, padding=0):
h_out = (h_in - 1) * stride - 2 * padding + kernel_size
w_out = (w_in - 1) * stride - 2 * padding + kernel_size
return int(np.floor(h_out)), int(np.floor(w_out))
def compute_conv_layer_sizes(h_in, w_in, kernel_sizes, strides, paddings=None):
if paddings == None:
for kernel, stride in zip(kernel_sizes, strides):
h_in, w_in = compute_conv_output_size(h_in, w_in, kernel, stride)
print('Output Size:', (h_in, w_in))
else:
for kernel, stride, padding in zip(kernel_sizes, strides, paddings):
h_in, w_in = compute_conv_output_size(h_in, w_in, kernel, stride,
padding=padding)
print('Output Size:', (h_in, w_in))
def compute_deconv_layer_sizes(h_in, w_in, kernel_sizes, strides,
paddings=None):
if paddings == None:
for kernel, stride in zip(kernel_sizes, strides):
h_in, w_in = compute_deconv_output_size(h_in, w_in, kernel, stride)
print('Output Size:', (h_in, w_in))
else:
for kernel, stride, padding in zip(kernel_sizes, strides, paddings):
h_in, w_in = compute_deconv_output_size(h_in, w_in, kernel, stride,
padding=padding)
print('Output Size:', (h_in, w_in))
"""
GPU wrappers
"""
_use_gpu = False
device = None
def set_gpu_mode(mode, gpu_id=0):
global _use_gpu
global device
global _gpu_id
_gpu_id = gpu_id
_use_gpu = mode
device = torch.device("cuda:" + str(gpu_id) if _use_gpu else "cpu")
def gpu_enabled():
return _use_gpu
def set_device(gpu_id):
torch.cuda.set_device(gpu_id)
# noinspection PyPep8Naming
def FloatTensor(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.FloatTensor(*args, **kwargs, device=torch_device)
def from_numpy(*args, **kwargs):
return torch.from_numpy(*args, **kwargs).float().to(device)
def get_numpy(tensor):
return tensor.to('cpu').detach().numpy()
def randint(*sizes, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.randint(*sizes, **kwargs, device=torch_device)
def zeros(*sizes, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.zeros(*sizes, **kwargs, device=torch_device)
def ones(*sizes, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.ones(*sizes, **kwargs, device=torch_device)
def ones_like(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.ones_like(*args, **kwargs, device=torch_device)
def randn(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.randn(*args, **kwargs, device=torch_device)
def zeros_like(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.zeros_like(*args, **kwargs, device=torch_device)
def tensor(*args, torch_device=None, **kwargs):
if torch_device is None:
torch_device = device
return torch.tensor(*args, **kwargs, device=torch_device)
def normal(*args, **kwargs):
return torch.normal(*args, **kwargs).to(device)
|
google-research/DBAP-algorithm
|
third_party/rlkit_library/rlkit/torch/pytorch_util.py
|
Python
|
apache-2.0
| 9,233
|
[
"Gaussian"
] |
5bc30e1d5da29d43a511430e3863f7ece998fcdb7b910e7d2854810f54737717
|
"""
UUTrack.Model.Cameras.Hamamatsu.py
==================================
Model class for controlling Hamamatsu cameras via de DCAM-API. At the time of writing this class,
little documentation on the DCAM-API was available. Hamamatsu has a different time schedule regardin support of
their own API. However, Zhuang's lab Github repository had a python driver for the Orca camera and with a bit of
tinkering things worked out.
DCAM-API relies mostly on setting parameters into the camera. The correct data type of each parameter is not well
documented; however it is possible to print all the available properties and work from there. The properties are
stored in a filed named params.txt next to the :mod:`Hamamatsu Driver
<UUTrack.Controller.devices.hamamatsu.hamamatsu_camera>`
.. note:: When setting the ROI, Hamamatsu only allows to set multiples of 4 for every setting (X,Y and vsize,
hsize). This is checked in the function. Changing the ROI cannot be done directly, one first needs to disable it
and then re-enable.
.. sectionauthor:: Aquiles Carattino <aquiles@aquicarattino.com>
"""
import numpy as np
from UUTrack.Controller.devices.hamamatsu.hamamatsu_camera import HamamatsuCamera
from ._skeleton import cameraBase
class camera(cameraBase):
MODE_CONTINUOUS = 1
MODE_SINGLE_SHOT = 0
MODE_EXTERNAL = 2
def __init__(self,camera):
self.cam_id = camera # Camera ID
self.camera = HamamatsuCamera(camera)
self.running = False
self.mode = self.MODE_SINGLE_SHOT
def initializeCamera(self):
""" Initializes the camera.
:return:
"""
self.camera.initCamera()
self.maxWidth = self.GetCCDWidth()
self.maxHeight = self.GetCCDHeight()
#This is important to not have shufled patches of the CCD.
#Have to check documentation!!
self.camera.setPropertyValue("readout_speed", 1)
self.camera.setPropertyValue("defect_correct_mode", 1)
def triggerCamera(self):
"""Triggers the camera.
"""
if self.getAcquisitionMode() == self.MODE_CONTINUOUS:
self.camera.startAcquisition()
elif self.getAcquisitionMode() == self.MODE_SINGLE_SHOT:
self.camera.startAcquisition()
self.camera.stopAcquisition()
def setAcquisitionMode(self, mode):
"""
Set the readout mode of the camera: Single or continuous.
Parameters
mode : int
One of self.MODE_CONTINUOUS, self.MODE_SINGLE_SHOT
"""
self.mode = mode
if mode == self.MODE_CONTINUOUS:
#self.camera.setPropertyValue("trigger_source", 1)
self.camera.settrigger(1)
self.camera.setmode(self.camera.CAPTUREMODE_SEQUENCE)
elif mode == self.MODE_SINGLE_SHOT:
#self.camera.setPropertyValue("trigger_source", 3)
self.camera.settrigger(1)
self.camera.setmode(self.camera.CAPTUREMODE_SNAP)
elif mode == self.MODE_EXTERNAL:
#self.camera.setPropertyValue("trigger_source", 2)
self.camera.settrigger(2)
return self.getAcquisitionMode()
def getAcquisitionMode(self):
"""Returns the acquisition mode, either continuous or single shot.
"""
return self.mode
def acquisitionReady(self):
"""Checks if the acquisition in the camera is over.
"""
return True
def setExposure(self,exposure):
"""
Sets the exposure of the camera.
"""
self.camera.setPropertyValue("exposure_time",exposure/1000)
return self.getExposure()
def getExposure(self):
"""
Gets the exposure time of the camera.
"""
return self.camera.getPropertyValue("exposure_time")[0]*1000
def readCamera(self):
"""
Reads the camera
"""
[frames, dims] = self.camera.getFrames()
img = []
for f in frames:
d = f.getData()
d = np.reshape(d, (dims[1], dims[0]))
d = d.T
img.append(d)
# img = frames[-1].getData()
# img = np.reshape(img,(dims[0],dims[1]))
return img
def setROI(self,X,Y):
"""
Sets up the ROI. Not all cameras are 0-indexed, so this is an important
place to define the proper ROI.
X -- array type with the coordinates for the ROI X[0], X[1]
Y -- array type with the coordinates for the ROI Y[0], Y[1]
"""
# First needs to go full frame, if not, throws an error of subframe not valid
self.camera.setPropertyValue("subarray_vpos", 0)
self.camera.setPropertyValue("subarray_hpos", 0)
self.camera.setPropertyValue("subarray_vsize", self.camera.max_height)
self.camera.setPropertyValue("subarray_hsize", self.camera.max_width)
self.camera.setSubArrayMode()
X-=1
Y-=1
# Because of how Orca Flash 4 works, all the ROI parameters have to be multiple of 4.
hsize = int(abs(X[0]-X[1])/4)*4
hpos = int(X[0]/4)*4
vsize = int(abs(Y[0]-Y[1])/4)*4
vpos = int(Y[0]/4)*4
self.camera.setPropertyValue("subarray_vpos", vpos)
self.camera.setPropertyValue("subarray_hpos", hpos)
self.camera.setPropertyValue("subarray_vsize", vsize)
self.camera.setPropertyValue("subarray_hsize", hsize)
self.camera.setSubArrayMode()
return self.getSize()
def getSize(self):
"""Returns the size in pixels of the image being acquired. This is useful for checking the ROI settings.
"""
X = self.camera.getPropertyValue("subarray_hsize")
Y = self.camera.getPropertyValue("subarray_vsize")
return X[0], Y[0]
def getSerialNumber(self):
"""Returns the serial number of the camera.
"""
return self.camera.getModelInfo(self.cam_id)
def GetCCDWidth(self):
"""
Returns
The CCD width in pixels
"""
return self.camera.max_width
def GetCCDHeight(self):
"""
Returns
The CCD height in pixels
"""
return self.camera.max_height
def stopAcq(self):
self.camera.stopAcquisition()
def stopCamera(self):
"""Stops the acquisition and closes the connection with the camera.
"""
try:
#Closing the camera
self.camera.stopAcquisition()
self.camera.shutdown()
return True
except:
#Camera failed to close
return False
|
uetke/experimentor
|
experimentor/models/Cameras/Hamamatsu.py
|
Python
|
mit
| 6,645
|
[
"ORCA"
] |
1ed13cba7790edd872992d9ffbc1dcb3c2a4456aeb13da37a092f71d7c793506
|
# coding=utf-8
# Copyright 2022 GradMax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""VGG Network."""
import functools
from typing import Any, Dict
from growneuron.cifar import wide_resnet
import growneuron.layers as glayers
import tensorflow as tf
NormalizationType = wide_resnet.NormalizationType
BatchNormalization = functools.partial(
tf.keras.layers.BatchNormalization,
epsilon=1e-5, # using epsilon and momentum defaults from Torch
momentum=0.9)
LayerNormalization = functools.partial(
tf.keras.layers.LayerNormalization,
epsilon=1e-5) # using epsilon and momentum defaults from Torch
def Conv2D(filters, seed=None, **kwargs):
"""Conv2D layer that is deterministically initialized."""
default_kwargs = {
"kernel_size": 3,
"padding": "same",
"use_bias": False,
# Note that we need to use the class constructor for the initializer to
# get deterministic initialization.
"kernel_initializer": tf.keras.initializers.HeNormal(seed=seed),
}
# Override defaults with the passed kwargs.
default_kwargs.update(kwargs)
return tf.keras.layers.Conv2D(filters, **default_kwargs)
class VGG(tf.keras.Model):
"""Builds a VGG CNN without the FC layers at the end.
We don't add the FC layers to stay in sync with the implementation in the
"Firefly Neural Architecture Descent" paper.
Attributes:
depth: Use 11 for VGG11, 16 for VGG16, etc.,
width_multiplier: The number of filters in the first layer
("1" corresponds to 64 filters).
num_classes: Number of output classes.
normalization_type: NormalizationType, of the normalization used inside
blocks.
l2: L2 regularization coefficient.
seed: random seed used for initialization.
"""
def __init__(self,
depth,
width_multiplier,
num_classes,
normalization_type,
l2,
seed = 42):
super().__init__(name=F"VGG-{depth}-{width_multiplier}")
l2_reg = tf.keras.regularizers.l2
rng_seed = [seed, seed + 1]
assert depth == 11, "Only supporting VGG11 right now"
# VGG consists of blocks of convs separated by downsampling.
# Within each block, each conv has of base_width * multiplier filters.
# This dict maps VGG-xx to a list of blocks.
architecture = {
11: [[1], [2], [4, 4], [8, 8], [8, 8]],
14: [[1, 1], [2, 2], [4, 4], [8, 8], [8, 8]],
16: [[1, 1], [2, 2], [4, 4, 4], [8, 8, 8], [8, 8, 8]],
19: [[1, 1], [2, 2], [4, 4, 4, 4], [8, 8, 8, 8], [8, 8, 8, 8]]
}
blocklist = architecture[depth]
base_width = int(64 * width_multiplier)
downsample = False
self.layer_list = []
for block in blocklist:
for multiplier in block:
rng_seed, seed = tf.random.experimental.stateless_split(rng_seed)
self.layer_list.append(glayers.GrowLayer(Conv2D(
base_width*multiplier, strides=1 if not downsample else 2,
seed=seed[0],
kernel_regularizer=tf.keras.regularizers.l2(l2))))
downsample = False
self.layer_list.append(tf.keras.layers.Activation(
glayers.get_activation_fn("relu1")),)
if normalization_type == NormalizationType.batchnorm:
self.layer_list.append(glayers.GrowLayer(
BatchNormalization(
beta_regularizer=tf.keras.regularizers.l2(l2),
gamma_regularizer=tf.keras.regularizers.l2(l2))))
elif normalization_type == NormalizationType.layernorm:
self.layer_list.append(glayers.GrowLayer(
LayerNormalization(
beta_regularizer=tf.keras.regularizers.l2(l2),
gamma_regularizer=tf.keras.regularizers.l2(l2))))
elif normalization_type == NormalizationType.none:
pass
else:
raise ValueError
downsample = True
self.layer_list.append(
glayers.GrowLayer(
Conv2D(num_classes, strides=2, kernel_regularizer=l2_reg(l2))))
self.layer_list.append(tf.keras.layers.Flatten())
def call(self, x):
for layer in self.layer_list:
x = layer(x)
return x
def get_grow_layer_tuples(self):
"""Gets all groups of layers that need to grow together."""
grow_layers = [
i for i, l in enumerate(self.layer_list)
if (isinstance(l, glayers.GrowLayer) and
isinstance(l.layer, tf.keras.layers.Conv2D))
]
grow_layer_tuples = []
for i, j in zip(grow_layers[:-1], grow_layers[1:]):
# Grow tuples should be in order.
grow_layer_tuples.append(self.layer_list[i:(j+1)])
return grow_layer_tuples
def create_model(
depth = 1,
width_multiplier = 1,
num_classes = 10,
l2_coef = 0.0,
normalization_type = "batchnorm",
**unused_kwargs):
"""Creates model."""
normalization_type = NormalizationType[normalization_type]
model = VGG(
depth=depth,
width_multiplier=width_multiplier,
num_classes=num_classes,
normalization_type=normalization_type,
l2=l2_coef)
return model
|
google-research/growneuron
|
growneuron/cifar/vgg.py
|
Python
|
apache-2.0
| 5,639
|
[
"Firefly"
] |
7eda791c5a53f31594581765eb9336290ad0fc29f57e3f6716dd5622643e33ee
|
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
try:
CLAW = os.environ['CLAW']
except:
raise Exception("*** Must first set CLAW enviornment variable")
# Scratch directory for storing topo and dtopo files:
scratch_dir = os.path.join(CLAW, 'geoclaw', 'scratch')
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#------------------------------------------------------------------
# GeoClaw specific parameters:
#------------------------------------------------------------------
rundata = setgeo(rundata)
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -120.0 # west longitude
clawdata.upper[0] = -60.0 # east longitude
clawdata.lower[1] = -60.0 # south latitude
clawdata.upper[1] = 0.0 # north latitude
# Number of grid cells: Coarsest grid
clawdata.num_cells[0] = 30
clawdata.num_cells[1] = 30
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# Note: If restarting, you must also change the Makefile to set:
# RESTART = True
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00036' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output nout frames at equally spaced times up to tfinal:
clawdata.num_output_times = 5
clawdata.tfinal = 5*3600.
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [0.5, 1.0]
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 3
clawdata.output_t0 = True
clawdata.output_format = 'ascii' # 'ascii' or 'netcdf'
clawdata.output_q_components = 'all' # need all
clawdata.output_aux_components = 'none' # eta=h+B is in q
clawdata.output_aux_onlyonce = False # output aux arrays each frame
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 1
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.2
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.75
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1,0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 1
# List of refinement ratios at each level:
amrdata.refinement_ratios_x = [2]
amrdata.refinement_ratios_y = [2]
amrdata.refinement_ratios_t = [2]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center','capacity','yleft']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = True # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
# ---------------
# Regions:
# ---------------
rundata.regiondata.regions = []
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
if 0:
# Allow only level 1 as default everywhere:
rundata.regiondata.regions.append([1, 1, 0., 1e9, -180, 180, -90, 90])
# Force refinement around earthquake source region for first hour:
rundata.regiondata.regions.append([3, 3, 0., 3600., -85,-72,-38,-25])
# Allow up to level 3 in northeastern part of domain:
rundata.regiondata.regions.append([1, 3, 0., 1.e9, -90,-60,-30,0])
# ---------------
# Gauges:
# ---------------
rundata.gaugedata.gauges = []
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
#rundata.gaugedata.gauges.append([32412, -86.392, -17.975, 0., 1.e10])
#rundata.gaugedata.gauges.append([123, -80., -7., 0., 1.e10])
return rundata
# end of function setrun
# ----------------------
#-------------------
def setgeo(rundata):
#-------------------
"""
Set GeoClaw specific runtime parameters.
For documentation see ....
"""
try:
geo_data = rundata.geo_data
except:
print "*** Error, this rundata has no geo_data attribute"
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367.5e3
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
geo_data.sea_level = 0.0
geo_data.dry_tolerance = 1.e-3
geo_data.friction_forcing = True
geo_data.manning_coefficient =.025
geo_data.friction_depth = 1e6
# Refinement settings
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.1
refinement_data.deep_depth = 1e2
refinement_data.max_level_deep = 3
# == settopo.data values ==
topo_data = rundata.topo_data
# for topography, append lines of the form
# [topotype, minlevel, maxlevel, t1, t2, fname]
topo_path = os.path.join(scratch_dir, 'etopo1_-140_-60_-60_10_10min.tt3')
topo_data.topofiles.append([3, 1, 3, 0., 1.e10, topo_path])
# == setdtopo.data values ==
dtopo_data = rundata.dtopo_data
# for moving topography, append lines of the form : (<= 1 allowed for now!)
# [topotype, minlevel,maxlevel,fname]
dtopo_path = os.path.join(scratch_dir, 'dtopo_usgs100227.tt3')
dtopo_data.dtopofiles.append([3,3,3,dtopo_path])
dtopo_data.dt_max_dtopo = 0.2
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [minlev, maxlev, fname]
# == setfixedgrids.data values ==
fixed_grids = rundata.fixed_grid_data
# for fixed grids append lines of the form
# [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\
# ioutarrivaltimes,ioutsurfacemax]
return rundata
# end of function setgeo
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
clawpack/geoclaw_tutorial_csdms2016
|
chile2010a/setrun_original.py
|
Python
|
bsd-2-clause
| 13,629
|
[
"NetCDF"
] |
bcbbc2b0371216da1088438e69eb52e74c00e1e6445e7d8db2ea1afa24931d2b
|
#!/usr/bin/env python3
import sys
import os
import argparse
def write_pymol_arrows(base, structs, scale, color, radius, hradius, hlength, threshold):
pymol_file = base + '_arrows.pymol'
lines = []
arrow_objs = set()
t2 = threshold**2
s2 = scale**2
for i, struct in enumerate(structs):
for j, atom in enumerate(struct):
arrow_obj = base + '_arrow_' + str(j)
arrow_objs.add(arrow_obj)
elem, xi, yi, zi, dx, dy, dz = atom
xf = xi + scale*dx
yf = yi + scale*dy
zf = zi + scale*dz
line = 'cgo_arrow [{}, {}, {}], [{}, {}, {}]'.format(xi, yi, zi, xf, yf, zf)
if len(structs) > 1:
line += ', state={}'.format(i+1)
if radius:
line += ', radius={}'.format(radius)
if hradius > 0:
line += ', hradius={}'.format(hradius)
if hlength > 0:
line += ', hlength={}'.format(hlength)
if color:
line += ', color={}'.format(color)
line += ', name={}'.format(arrow_obj)
if (dx**2 + dy**2 + dz**2)*s2 > t2:
lines.append(line)
arrow_group = base + '_arrows'
line = 'group {}, {}'.format(arrow_group, ' '.join(arrow_objs))
lines.append(line)
with open(pymol_file, 'w') as f:
f.write('\n'.join(lines))
def xyz_line_to_atom(xyz_line):
fields = xyz_line.split()
elem = fields[0]
x = float(fields[1])
y = float(fields[2])
z = float(fields[3])
dx = float(fields[4])
dy = float(fields[5])
dz = float(fields[6])
return elem, x, y, z, dx, dy, dz
def atom_to_pdb_line(atom, idx, dosum):
if not isinstance(idx, int) or idx < 0 or idx > 99999:
raise TypeError('idx must be an integer from 0 to 99999 ({})'.format(idx))
elem, x, y, z, dx, dy, dz = atom
if len(elem) not in {1, 2}:
raise IndexError('atom elem must be a string of length 1 or 2 ({})'.format(elem))
if dosum:
d = dx+dy+dz
else:
d = (dx**2 + dy**2 + dz**2)**0.5
return '{:6}{:5} {:4}{:1}{:3} {:1}{:4}{:1} {:8.3f}{:8.3f}{:8.3f}{:6.2f}{:6f} {:2}{:2}' \
.format('ATOM', idx, '', '', '', '', '', '', x, y, z, 1.0, d, elem.rjust(2), '')
def read_xyz_file(xyz_file, header_len=2):
with open(xyz_file, 'r') as f:
lines = f.readlines()
structs = []
struct_start = 0
for i, line in enumerate(lines):
try:
# line index relative to struct start
j = i - struct_start
if j == 0 or j >= header_len + n_atoms:
struct_start = i
structs.append([])
n_atoms = int(lines[i])
elif j < header_len:
continue
else:
atom = xyz_line_to_atom(lines[i])
structs[-1].append(atom)
except:
print('{}:{} {}'.format(xyz_file, i, repr(line)), file=sys.stderr)
raise
return structs
def write_pdb_file(pdb_file, atoms, dosum):
lines = []
for i, atom in enumerate(atoms):
line = atom_to_pdb_line(atom, i, dosum)
lines.append(line)
if pdb_file:
with open(pdb_file, 'w') as f:
f.write('\n'.join(lines))
else:
print('\n'.join(lines))
def parse_args():
parser = argparse.ArgumentParser(description='Output a pymol script that creates \
arrows from an .xyz file containing atom coordinates and gradient components, \
can also create a .pdb file where the b-factor is the gradient magnitude')
parser.add_argument('xyz_file')
parser.add_argument('-s', '--scale', type=float, default=1.0,
help='Arrow length scaling factor')
parser.add_argument('-c', '--color', type=str, default='',
help='Arrow color or pair of colors, e.g. "white black"')
parser.add_argument('-r', '--radius', type=float, default=0.2,
help='Radius of arrow body')
parser.add_argument('-hr', '--hradius', type=float, default=-1,
help='Radius of arrow head')
parser.add_argument('-hl', '--hlength', type=float, default=-1,
help='Length of arrow head')
parser.add_argument('-p', '--pdb_file', action='store_true', default=False,
help='Output a .pdb file where the b-factor is gradient magnitude')
parser.add_argument('--sum', action='store_true', default=False,
help='Sum gradient components instead of taking magnitude')
parser.add_argument('-t', '--threshold', type=float, default=0,
help="Gradient threshold for drawing arrows (using scale factor)")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
structs = read_xyz_file(args.xyz_file)
base_name = args.xyz_file.replace('.xyz', '')
write_pymol_arrows(base_name, structs, args.scale, args.color, args.radius, args.hradius, args.hlength, args.threshold)
if args.pdb_file:
pdb_file = base_name + '.pdb'
write_pdb_file(pdb_file, atoms, args.sum)
|
gnina/scripts
|
pymol_arrows.py
|
Python
|
bsd-3-clause
| 5,070
|
[
"PyMOL"
] |
b439473ee026ef10aca592772399fce4a2dc3bcd1fcb84d1b04574a40e72faa0
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import re
import sys
import uuid
import warnings
from collections import Counter
from itertools import product
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Union
import numpy as np
import qcelemental as qcel
from psi4 import core
from psi4.driver import qcdb
from . import optproc
from .exceptions import TestComparisonError, ValidationError, UpgradeHelper
## Python basis helps
@staticmethod
def _pybuild_basis(mol,
key=None,
target=None,
fitrole='ORBITAL',
other=None,
puream=-1,
return_atomlist=False,
quiet=False):
if key == 'ORBITAL':
key = 'BASIS'
def _resolve_target(key, target):
"""Figure out exactly what basis set was intended by (key, target)
"""
horde = qcdb.libmintsbasisset.basishorde
if not target:
if not key:
key = 'BASIS'
target = core.get_global_option(key)
if target in horde:
return horde[target]
return target
# Figure out what exactly was meant by 'target'.
resolved_target = _resolve_target(key, target)
# resolved_target needs to be either a string or function for pyconstuct.
# if a string, they search for a gbs file with that name.
# if a function, it needs to apply a basis to each atom.
bs, basisdict = qcdb.BasisSet.pyconstruct(mol.to_dict(),
key,
resolved_target,
fitrole,
other,
return_dict=True,
return_atomlist=return_atomlist)
if return_atomlist:
atom_basis_list = []
for atbs in basisdict:
atommol = core.Molecule.from_dict(atbs['molecule'])
lmbs = core.BasisSet.construct_from_pydict(atommol, atbs, puream)
atom_basis_list.append(lmbs)
return atom_basis_list
if isinstance(resolved_target, str):
basisdict['name'] = basisdict['name'].split('/')[-1].replace('.gbs', '')
if callable(resolved_target):
basisdict['name'] = resolved_target.__name__.replace('basisspec_psi4_yo__', '').upper()
if not quiet:
core.print_out(basisdict['message'])
if 'ECP' in basisdict['message']:
core.print_out(' !!! WARNING: ECP capability is in beta. Please check occupations closely. !!!\n\n')
if basisdict['key'] is None:
basisdict['key'] = 'BASIS'
psibasis = core.BasisSet.construct_from_pydict(mol, basisdict, puream)
return psibasis
core.BasisSet.build = _pybuild_basis
## Python wavefunction helps
@staticmethod
def _core_wavefunction_build(mol, basis=None):
if basis is None:
basis = core.BasisSet.build(mol)
elif isinstance(basis, str):
basis = core.BasisSet.build(mol, "ORBITAL", basis)
wfn = core.Wavefunction(mol, basis)
# Set basis for density-fitted calculations to the zero basis...
# ...until the user explicitly provides a DF basis.
wfn.set_basisset("DF_BASIS_SCF", core.BasisSet.zero_ao_basis_set())
return wfn
core.Wavefunction.build = _core_wavefunction_build
def _core_wavefunction_get_scratch_filename(self, filenumber):
""" Given a wavefunction and a scratch file number, canonicalizes the name
so that files can be consistently written and read """
fname = os.path.split(os.path.abspath(core.get_writer_file_prefix(self.molecule().name())))[1]
psi_scratch = core.IOManager.shared_object().get_default_path()
return os.path.join(psi_scratch, fname + '.' + str(filenumber))
core.Wavefunction.get_scratch_filename = _core_wavefunction_get_scratch_filename
@staticmethod
def _core_wavefunction_from_file(wfn_data: Union[str, Dict, Path]) -> core.Wavefunction:
r"""Build Wavefunction from data.
Parameters
----------
wfn_data
If a dict, use data directly. Otherwise, path-like passed to :py:func:`numpy.load`
to read from disk.
Returns
-------
Wavefunction
A deserialized Wavefunction object
"""
# load the wavefunction from file
if isinstance(wfn_data, dict):
pass
elif isinstance(wfn_data, str):
if not wfn_data.endswith(".npy"):
wfn_data = wfn_data + ".npy"
wfn_data = np.load(wfn_data, allow_pickle=True).item()
else:
# Could be path-like or file-like, let `np.load` handle it
wfn_data = np.load(wfn_data, allow_pickle=True).item()
# variable type specific dictionaries to be passed into C++ constructor
wfn_matrix = wfn_data['matrix']
wfn_vector = wfn_data['vector']
wfn_dimension = wfn_data['dimension']
wfn_int = wfn_data['int']
wfn_string = wfn_data['string']
wfn_boolean = wfn_data['boolean']
wfn_float = wfn_data['float']
wfn_floatvar = wfn_data['floatvar']
wfn_matrixarr = wfn_data['matrixarr']
# reconstruct molecule from dictionary representation
wfn_molecule = wfn_data['molecule']
molecule = core.Molecule.from_dict(wfn_molecule)
# get basis set name and spherical harmonics boolean
basis_name = wfn_string['basisname']
if ".gbs" in basis_name:
basis_name = basis_name.split('/')[-1].replace('.gbs', '')
basis_puream = wfn_boolean['basispuream']
basisset = core.BasisSet.build(molecule, 'ORBITAL', basis_name, puream=basis_puream)
# change some variables to psi4 specific data types (Matrix, Vector, Dimension)
for label in wfn_matrix:
array = wfn_matrix[label]
wfn_matrix[label] = core.Matrix.from_array(array, name=label) if array is not None else None
for label in wfn_vector:
array = wfn_vector[label]
wfn_vector[label] = core.Vector.from_array(array, name=label) if array is not None else None
for label in wfn_dimension:
tup = wfn_dimension[label]
wfn_dimension[label] = core.Dimension.from_list(tup, name=label) if tup is not None else None
for label in wfn_matrixarr:
array = wfn_matrixarr[label]
wfn_matrixarr[label] = core.Matrix.from_array(array, name=label) if array is not None else None
# make the wavefunction
wfn = core.Wavefunction(molecule, basisset, wfn_matrix, wfn_vector, wfn_dimension, wfn_int, wfn_string,
wfn_boolean, wfn_float)
# some of the wavefunction's variables can be changed directly
for k, v in wfn_floatvar.items():
wfn.set_variable(k, v)
for k, v in wfn_matrixarr.items():
wfn.set_variable(k, v)
return wfn
core.Wavefunction.from_file = _core_wavefunction_from_file
def _core_wavefunction_to_file(wfn: core.Wavefunction, filename: str = None) -> Dict:
"""Converts a Wavefunction object to a base class
Parameters
----------
wfn
A Wavefunction or inherited class
filename
An optional filename to write the data to
Returns
-------
dict
A dictionary and NumPy representation of the Wavefunction.
"""
# collect the wavefunction's variables in a dictionary indexed by varaible type
# some of the data types have to be made numpy-friendly first
if wfn.basisset().name().startswith("anonymous"):
raise ValidationError("Cannot serialize wavefunction with custom basissets.")
wfn_data = {
'molecule': wfn.molecule().to_dict(),
'matrix': {
'Ca': wfn.Ca().to_array() if wfn.Ca() else None,
'Cb': wfn.Cb().to_array() if wfn.Cb() else None,
'Da': wfn.Da().to_array() if wfn.Da() else None,
'Db': wfn.Db().to_array() if wfn.Db() else None,
'Fa': wfn.Fa().to_array() if wfn.Fa() else None,
'Fb': wfn.Fb().to_array() if wfn.Fb() else None,
'H': wfn.H().to_array() if wfn.H() else None,
'S': wfn.S().to_array() if wfn.S() else None,
'X': wfn.lagrangian().to_array() if wfn.lagrangian() else None,
'aotoso': wfn.aotoso().to_array() if wfn.aotoso() else None,
'gradient': wfn.gradient().to_array() if wfn.gradient() else None,
'hessian': wfn.hessian().to_array() if wfn.hessian() else None
},
'vector': {
'epsilon_a': wfn.epsilon_a().to_array() if wfn.epsilon_a() else None,
'epsilon_b': wfn.epsilon_b().to_array() if wfn.epsilon_b() else None,
'frequencies': wfn.frequencies().to_array() if wfn.frequencies() else None
},
'dimension': {
'doccpi': wfn.doccpi().to_tuple(),
'frzcpi': wfn.frzcpi().to_tuple(),
'frzvpi': wfn.frzvpi().to_tuple(),
'nalphapi': wfn.nalphapi().to_tuple(),
'nbetapi': wfn.nbetapi().to_tuple(),
'nmopi': wfn.nmopi().to_tuple(),
'nsopi': wfn.nsopi().to_tuple(),
'soccpi': wfn.soccpi().to_tuple()
},
'int': {
'nalpha': wfn.nalpha(),
'nbeta': wfn.nbeta(),
'nfrzc': wfn.nfrzc(),
'nirrep': wfn.nirrep(),
'nmo': wfn.nmo(),
'nso': wfn.nso(),
'print': wfn.get_print(),
},
'string': {
'name': wfn.name(),
'module': wfn.module(),
'basisname': wfn.basisset().name()
},
'boolean': {
'PCM_enabled': wfn.PCM_enabled(),
'same_a_b_dens': wfn.same_a_b_dens(),
'same_a_b_orbs': wfn.same_a_b_orbs(),
'density_fitted': wfn.density_fitted(),
'basispuream': wfn.basisset().has_puream()
},
'float': {
'energy': wfn.energy(),
'efzc': wfn.efzc(),
'dipole_field_x': wfn.get_dipole_field_strength()[0],
'dipole_field_y': wfn.get_dipole_field_strength()[1],
'dipole_field_z': wfn.get_dipole_field_strength()[2]
},
'floatvar': wfn.scalar_variables(),
'matrixarr': {k: v.to_array() for k, v in wfn.array_variables().items()}
} # yapf: disable
if filename is not None:
if not filename.endswith('.npy'): filename += '.npy'
np.save(filename, wfn_data, allow_pickle=True)
return wfn_data
core.Wavefunction.to_file = _core_wavefunction_to_file
## Python JK helps
@staticmethod
def _core_jk_build(orbital_basis: core.BasisSet, aux: core.BasisSet = None, jk_type: str = None, do_wK: bool = None, memory: int = None) -> core.JK:
"""
Constructs a Psi4 JK object from an input basis.
Parameters
----------
orbital_basis
Orbital basis to use in the JK object.
aux
Optional auxiliary basis set for density-fitted tensors. Defaults
to the DF_BASIS_SCF if set, otherwise the correspond JKFIT basis
to the passed in `orbital_basis`.
jk_type
Type of JK object to build (DF, Direct, PK, etc). Defaults to the
current global SCF_TYPE option.
Returns
-------
JK
Uninitialized JK object.
Example
-------
jk = psi4.core.JK.build(bas)
jk.set_memory(int(5e8)) # 4GB of memory
jk.initialize()
...
jk.C_left_add(matirx)
jk.compute()
jk.C_clear()
...
"""
optstash = optproc.OptionsState(["SCF_TYPE"])
if jk_type is not None:
core.set_global_option("SCF_TYPE", jk_type)
if aux is None:
if core.get_global_option("SCF_TYPE") == "DF":
aux = core.BasisSet.build(orbital_basis.molecule(), "DF_BASIS_SCF", core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", orbital_basis.name(), orbital_basis.has_puream())
else:
aux = core.BasisSet.zero_ao_basis_set()
if (do_wK is None) or (memory is None):
jk = core.JK.build_JK(orbital_basis, aux)
else:
jk = core.JK.build_JK(orbital_basis, aux, bool(do_wK), int(memory))
optstash.restore()
return jk
core.JK.build = _core_jk_build
## Grid Helpers
def _core_vbase_get_np_xyzw(Vpot):
"""
Returns the x, y, z, and weights of a grid as a tuple of NumPy array objects.
"""
x_list = []
y_list = []
z_list = []
w_list = []
# Loop over every block in the potenital
for b in range(Vpot.nblocks()):
# Obtain the block
block = Vpot.get_block(b)
# Obtain the x, y, and z coordinates along with the weight
x_list.append(block.x())
y_list.append(block.y())
z_list.append(block.z())
w_list.append(block.w())
x = np.hstack(x_list)
y = np.hstack(y_list)
z = np.hstack(z_list)
w = np.hstack(w_list)
return (x, y, z, w)
core.VBase.get_np_xyzw = _core_vbase_get_np_xyzw
## Python other helps
def set_options(options_dict: Dict[str, Any], verbose: int = 1) -> None:
"""Sets Psi4 options from an input dictionary.
Parameters
----------
options_dict
Dictionary where keys are "option_name" for global options or
"module_name__option_name" (double underscore separation) for
option local to "module_name". Values are the option value. All
are case insensitive.
verbose
Control print volume.
"""
optionre = re.compile(r'\A(?P<module>\w+__)?(?P<option>\w+)\Z', re.IGNORECASE)
rejected = {}
for k, v, in options_dict.items():
mobj = optionre.match(k.strip())
module = mobj.group('module').upper()[:-2] if mobj.group('module') else None
option = mobj.group('option').upper()
if module:
if ((module, option, v) not in [('SCF', 'GUESS', 'READ')]) and ((module, option) not in [('PCM', 'INPUT')]):
# TODO guess/read exception is for distributed driver. should be handled differently.
try:
core.set_local_option(module, option, v)
except RuntimeError as err:
rejected[k] = (v, err)
if verbose > 1:
print('Setting: core.set_local_option', module, option, v)
if (module, option) == ("PCM", "INPUT"):
pcm_helper(v)
else:
try:
core.set_global_option(option, v)
except RuntimeError as err:
rejected[k] = (v, err)
if verbose > 1:
print('Setting: core.set_global_option', option, v)
if rejected:
raise ValidationError(f'Error setting options: {rejected}')
# TODO could subclass ValidationError and append rejected so that run_json could handle remanants.
def set_module_options(module: str, options_dict: Dict[str, Any]) -> None:
"""
Sets Psi4 module options from a module specification and input dictionary.
"""
warnings.warn(
"Using `psi4.set_module_options(<module>, {<key>: <val>})` instead of `psi4.set_options({<module>__<key>: <val>})` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=2)
for k, v, in options_dict.items():
core.set_local_option(module.upper(), k.upper(), v)
## OEProp helpers
def pcm_helper(block: str):
"""
Passes multiline string *block* to PCMSolver parser.
Parameters
----------
block
multiline string with PCM input in PCMSolver syntax.
"""
import pcmsolver
with NamedTemporaryFile(mode="w+t", delete=True) as fl:
fl.write(block)
fl.flush()
parsed_pcm = pcmsolver.parse_pcm_input(fl.name)
with NamedTemporaryFile(mode="w+t", delete=False) as fl:
fl.write(parsed_pcm)
core.set_local_option("PCM", "PCMSOLVER_PARSED_FNAME", fl.name)
def basname(name):
"""Imitates BasisSet.make_filename() without the gbs extension"""
return name.lower().replace('+', 'p').replace('*', 's').replace('(', '_').replace(')', '_').replace(',', '_')
def temp_circular_import_blocker():
pass
def basis_helper(block, name='', key='BASIS', set_option=True):
"""For PsiAPI mode, forms a basis specification function from *block*
and associates it with keyword *key* under handle *name*. Registers
the basis spec with Psi4 so that it can be applied again to future
molecules. For usage, see mints2, mints9, and cc54 test cases. Unless
*set_option* is False, *name* will be set as current active *key*,
equivalent to `set key name` or `set_option({key: name})`.
"""
key = key.upper()
name = ('anonymous' + str(uuid.uuid4())[:8]) if name == '' else name
cleanbas = basname(name).replace('-', '') # further remove hyphens so can be function name
block = qcel.util.filter_comments(block)
command_lines = re.split('\n', block)
symbol_re = re.compile(r'^\s*assign\s+(?P<symbol>[A-Z]{1,3})\s+(?P<basis>[-+*\(\)\w]+)\s*$', re.IGNORECASE)
label_re = re.compile(
r'^\s*assign\s+(?P<label>(?P<symbol>[A-Z]{1,3})(?:(_\w+)|(\d+))?)\s+(?P<basis>[-+*\(\)\w]+)\s*$',
re.IGNORECASE)
all_re = re.compile(r'^\s*assign\s+(?P<basis>[-+*\(\)\w]+)\s*$', re.IGNORECASE)
basislabel = re.compile(r'\s*\[\s*([-*\(\)\w]+)\s*\]\s*')
def anon(mol, role):
basstrings = {}
# Start by looking for assign lines, and remove them
leftover_lines = []
assignments = False
for line in command_lines:
if symbol_re.match(line):
m = symbol_re.match(line)
mol.set_basis_by_symbol(m.group('symbol'), m.group('basis'), role=role)
assignments = True
elif label_re.match(line):
m = label_re.match(line)
mol.set_basis_by_label(m.group('label'), m.group('basis'), role=role)
assignments = True
elif all_re.match(line):
m = all_re.match(line)
mol.set_basis_all_atoms(m.group('basis'), role=role)
assignments = True
else:
# Ignore blank lines and accumulate remainder
if line and not line.isspace():
leftover_lines.append(line.strip())
# Now look for regular basis set definitions
basblock = list(filter(None, basislabel.split('\n'.join(leftover_lines))))
if len(basblock) == 1:
if not assignments:
# case with no [basname] markers where whole block is contents of gbs file
mol.set_basis_all_atoms(name, role=role)
basstrings[basname(name)] = basblock[0]
else:
message = (
"Conflicting basis set specification: assign lines present but shells have no [basname] label."
"")
raise TestComparisonError(message)
else:
# case with specs separated by [basname] markers
for idx in range(0, len(basblock), 2):
basstrings[basname(basblock[idx])] = basblock[idx + 1]
return basstrings
anon.__name__ = 'basisspec_psi4_yo__' + cleanbas
qcdb.libmintsbasisset.basishorde[name.upper()] = anon
if set_option:
core.set_global_option(key, name)
core.OEProp.valid_methods = [
'DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'LOWDIN_CHARGES', 'WIBERG_LOWDIN_INDICES', 'MAYER_INDICES',
'MBIS_CHARGES', 'MO_EXTENTS', 'GRID_FIELD', 'GRID_ESP', 'ESP_AT_NUCLEI', 'NO_OCCUPATIONS'
]
## Option helpers
def _core_set_global_option_python(key, EXTERN):
"""
This is a fairly hacky way to get around EXTERN issues. Effectively we are routing this option Python side through attributes until the general Options overhaul.
"""
if (key != "EXTERN"):
raise ValidationError("Options: set_global_option_python does not recognize keyword %s" % key)
if EXTERN is None:
core.EXTERN = None
core.set_global_option("EXTERN", False)
elif isinstance(EXTERN, core.ExternalPotential):
# Well this is probably the worst hack I have done, thats saying something
core.EXTERN = EXTERN
core.set_global_option("EXTERN", True)
else:
raise ValidationError("Options: set_global_option_python can either be a NULL or External Potential object")
core.set_global_option_python = _core_set_global_option_python
## QCvar helps
_qcvar_transitions = {
"SCSN-MP2 CORRELATION ENERGY": "SCS(N)-MP2 CORRELATION ENERGY",
"SCSN-MP2 TOTAL ENERGY": "SCS(N)-MP2 TOTAL ENERGY",
"MAYER_INDICES": "MAYER INDICES",
"WIBERG_LOWDIN_INDICES": "WIBERG LOWDIN INDICES",
"LOWDIN_CHARGES": "LOWDIN CHARGES",
"MULLIKEN_CHARGES": "MULLIKEN CHARGES",
"(AT) CORRECTION ENERGY": "A-(T) CORRECTION ENERGY",
"CCSD(AT) TOTAL ENERGY": "A-CCSD(T) TOTAL ENERGY",
"CCSD(AT) CORRELATION ENERGY": "A-CCSD(T) CORRELATION ENERGY",
}
_qcvar_cancellations = {
"SCSN-MP2 SAME-SPIN CORRELATION ENERGY": ["MP2 SAME-SPIN CORRELATION ENERGY"],
"SCSN-MP2 OPPOSITE-SPIN CORRELATION ENERGY": ["MP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS-CCSD SAME-SPIN CORRELATION ENERGY": ["CCSD SAME-SPIN CORRELATION ENERGY"],
"SCS-CCSD OPPOSITE-SPIN CORRELATION ENERGY": ["CCSD OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS-MP2 SAME-SPIN CORRELATION ENERGY": ["MP2 SAME-SPIN CORRELATION ENERGY"],
"SCS-MP2 OPPOSITE-SPIN CORRELATION ENERGY": ["MP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS(N)-OMP2 CORRELATION ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS(N)-OMP2 TOTAL ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCSN-OMP2 CORRELATION ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCSN-OMP2 TOTAL ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
}
def _qcvar_warnings(key: str) -> str:
if any([key.upper().endswith(" DIPOLE " + cart) for cart in ["X", "Y", "Z"]]):
warnings.warn(
f"Using scalar QCVariable `{key.upper()}` [D] instead of array `{key.upper()[:-2]}` [e a0] is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=3)
if any([key.upper().endswith(" QUADRUPOLE " + cart) for cart in ["XX", "YY", "ZZ", "XY", "XZ", "YZ"]]):
warnings.warn(
f"Using scalar QCVariable `{key.upper()}` [D A] instead of array `{key.upper()[:-3]}` [e a0^2] is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=3)
if key.upper() in _qcvar_transitions:
warnings.warn(
f"Using QCVariable `{key.upper()}` instead of `{_qcvar_transitions[key.upper()]}` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=3)
return _qcvar_transitions[key.upper()]
if key.upper() in _qcvar_cancellations:
raise UpgradeHelper(key.upper(), "no direct replacement", 1.4, " Consult QCVariables " + ", ".join(_qcvar_cancellations[key.upper()]) + " to recompose the quantity.")
return key
_multipole_order = ["dummy", "dummy", "QUADRUPOLE", "OCTUPOLE", "HEXADECAPOLE"]
for order in range(5, 10):
_multipole_order.append(f"{int(2**order)}-POLE")
def _qcvar_reshape_set(key, val):
"""Reverse `_qcvar_reshape_get` for internal psi4.core.Matrix storage."""
reshaper = None
if key.upper().startswith("MBIS"):
if key.upper().endswith("CHARGES"):
return val
elif key.upper().endswith("DIPOLES"):
reshaper = (-1, 3)
return val.reshape(reshaper)
elif key.upper().endswith("QUADRUPOLES"):
val = val.reshape(-1, 3, 3)
val = np.array([_multipole_compressor(val[iat], 2) for iat in range(len(val))])
return val
elif key.upper().endswith("OCTUPOLES"):
val = val.reshape(-1, 3, 3, 3)
val = np.array([_multipole_compressor(val[iat], 3) for iat in range(len(val))])
return val
elif key.upper().endswith("DIPOLE"):
reshaper = (1, 3)
elif any(key.upper().endswith(p) for p in _multipole_order):
val = _multipole_compressor(val, _multipole_order.index(key.upper().split()[-1]))
reshaper = (1, -1)
elif key.upper() in ["MULLIKEN_CHARGES", "LOWDIN_CHARGES", "MULLIKEN CHARGES", "LOWDIN CHARGES"]:
reshaper = (1, -1)
if reshaper:
return val.reshape(reshaper)
else:
return val
def _qcvar_reshape_get(key, val):
"""For QCVariables where the 2D psi4.core.Matrix shape is unnatural, convert to natural shape in ndarray."""
reshaper = None
if key.upper().startswith("MBIS"):
if key.upper().endswith("CHARGES"):
return val.np
elif key.upper().endswith("DIPOLES"):
reshaper = (-1, 3)
return val.np.reshape(reshaper)
elif key.upper().endswith("QUADRUPOLES"):
val = val.np.reshape(-1, 6)
val = np.array([_multipole_plumper(val[iat], 2) for iat in range(len(val))])
return val
elif key.upper().endswith("OCTUPOLES"):
val = val.np.reshape(-1, 10)
val = np.array([_multipole_plumper(val[iat], 3) for iat in range(len(val))])
return val
elif key.upper().endswith("DIPOLE"):
reshaper = (3, )
elif any(key.upper().endswith(p) for p in _multipole_order):
return _multipole_plumper(val.np.reshape((-1, )), _multipole_order.index(key.upper().split()[-1]))
elif key.upper() in ["MULLIKEN_CHARGES", "LOWDIN_CHARGES", "MULLIKEN CHARGES", "LOWDIN CHARGES"]:
reshaper = (-1, )
if reshaper:
return val.np.reshape(reshaper)
else:
return val
def _multipole_compressor(complete, order):
"""Form flat unique components multipole array from complete Cartesian array.
Parameters
----------
order : int
Multipole order. e.g., 1 for dipole, 4 for hexadecapole.
complete : ndarray
Multipole array, order-dimensional Cartesian array expanded to complete components.
Returns
-------
compressed : ndarray
Multipole array, length (order + 1) * (order + 2) / 2 compressed to unique components.
"""
compressed = []
for ii in range(order + 1):
lx = order - ii
for lz in range(ii + 1):
ly = ii - lz
np_index = []
for xval in range(lx):
np_index.append(0)
for yval in range(ly):
np_index.append(1)
for zval in range(lz):
np_index.append(2)
compressed.append(complete[tuple(np_index)])
assert len(compressed) == ((order + 1) * (order + 2) / 2)
return np.array(compressed)
def _multipole_plumper(compressed: np.ndarray, order: int) -> np.ndarray:
"""Form multidimensional multipole array from unique components array.
Parameters
----------
order
Multipole order. e.g., 1 for dipole, 4 for hexadecapole.
compressed
Multipole array, length (order + 1) * (order + 2) / 2 compressed to unique components.
Returns
-------
complete : numpy.ndarray
Multipole array, order-dimensional Cartesian array expanded to complete components.
"""
shape = tuple([3] * order)
complete = np.zeros(shape)
def compound_index(counter):
# thanks, https://www.pamoc.it/tpc_cart_mom.html Eqn 2.2!
# jn = nz + (ny + nz)(ny + nz + 1) / 2
return int(
counter.get("2", 0) + (counter.get("1", 0) + counter.get("2", 0)) *
(counter.get("1", 0) + counter.get("2", 0) + 1) / 2)
for idx in product("012", repeat=order):
xyz_counts = Counter(idx) # "010" --> {"0": 2, "1": 1}
np_index = tuple(int(x) for x in idx) # ('0', '1') --> (0, 1)
complete[np_index] = compressed[compound_index(xyz_counts)]
return complete
def _core_has_variable(key: str) -> bool:
"""Whether scalar or array QCVariable *key* has been set in global memory."""
return core.has_scalar_variable(key) or core.has_array_variable(key)
def _core_wavefunction_has_variable(cls: core.Wavefunction, key: str) -> bool:
"""Whether scalar or array QCVariable *key* has been set on *self* :class:`psi4.core.Wavefunction`."""
return cls.has_scalar_variable(key) or cls.has_array_variable(key)
def _core_variable(key: str) -> Union[float, core.Matrix, np.ndarray]:
"""Return copy of scalar or array QCVariable *key* from global memory.
Returns
-------
float or numpy.ndarray or Matrix
Scalar variables are returned as floats.
Array variables not naturally 2D (like multipoles) are returned as :class:`numpy.ndarray` of natural dimensionality.
Other array variables are returned as :py:class:`~psi4.core.Matrix` and may have an extra dimension with symmetry information.
Example
-------
>>> psi4.gradient("hf/cc-pvdz")
>>> psi4.variable("CURRENT ENERGY")
-100.00985995185668
>>> psi4.variable("CURRENT DIPOLE")
array([ 0. , 0. , -0.83217802])
>>> psi4.variable("CURRENT GRADIENT")
<psi4.core.Matrix object at 0x12d884fc0>
>>> psi4.variable("CURRENT GRADIENT").np
array([[ 6.16297582e-33, 6.16297582e-33, -9.41037138e-02],
[-6.16297582e-33, -6.16297582e-33, 9.41037138e-02]])
"""
key = _qcvar_warnings(key)
if core.has_scalar_variable(key):
return core.scalar_variable(key)
elif core.has_array_variable(key):
return _qcvar_reshape_get(key, core.array_variable(key))
else:
raise KeyError("psi4.core.variable: Requested variable " + key + " was not set!\n")
def _core_wavefunction_variable(cls: core.Wavefunction, key: str) -> Union[float, core.Matrix, np.ndarray]:
"""Return copy of scalar or array QCVariable *key* from *self* :class:`psi4.core.Wavefunction`.
Returns
-------
float or numpy.ndarray or Matrix
Scalar variables are returned as floats.
Array variables not naturally 2D (like multipoles) are returned as :class:`numpy.ndarray` of natural dimensionality.
Other array variables are returned as :py:class:`~psi4.core.Matrix` and may have an extra dimension with symmetry information.
Example
-------
>>> g, wfn = psi4.gradient("hf/cc-pvdz", return_wfn=True)
>>> wfn.variable("CURRENT ENERGY")
-100.00985995185668
>>> wfn.variable("CURRENT DIPOLE")
array([ 0. , 0. , -0.83217802])
>>> wfn.variable("CURRENT GRADIENT")
<psi4.core.Matrix object at 0x12d884fc0>
>>> wfn.variable("CURRENT GRADIENT").np
array([[ 6.16297582e-33, 6.16297582e-33, -9.41037138e-02],
[-6.16297582e-33, -6.16297582e-33, 9.41037138e-02]])
"""
key = _qcvar_warnings(key)
if cls.has_scalar_variable(key):
return cls.scalar_variable(key)
elif cls.has_array_variable(key):
return _qcvar_reshape_get(key, cls.array_variable(key))
else:
raise KeyError("psi4.core.Wavefunction.variable: Requested variable " + key + " was not set!\n")
def _core_set_variable(key: str, val: Union[core.Matrix, np.ndarray, float]) -> None:
"""Sets scalar or array QCVariable *key* to *val* in global memory."""
if isinstance(val, core.Matrix):
if core.has_scalar_variable(key):
raise ValidationError("psi4.core.set_variable: Target variable " + key + " already a scalar variable!")
else:
core.set_array_variable(key, val)
elif isinstance(val, np.ndarray):
if core.has_scalar_variable(key):
raise ValidationError("psi4.core.set_variable: Target variable " + key + " already a scalar variable!")
else:
core.set_array_variable(key, core.Matrix.from_array(_qcvar_reshape_set(key, val)))
else:
if core.has_array_variable(key):
raise ValidationError("psi4.core.set_variable: Target variable " + key + " already an array variable!")
else:
core.set_scalar_variable(key, val)
# TODO _qcvar_warnings(key)
def _core_wavefunction_set_variable(cls: core.Wavefunction, key: str, val: Union[core.Matrix, np.ndarray, float]) -> None:
"""Sets scalar or array QCVariable *key* to *val* on *cls*."""
if isinstance(val, core.Matrix):
if cls.has_scalar_variable(key):
raise ValidationError("psi4.core.Wavefunction.set_variable: Target variable " + key +
" already a scalar variable!")
else:
cls.set_array_variable(key, val)
elif isinstance(val, np.ndarray):
if cls.has_scalar_variable(key):
raise ValidationError("psi4.core.Wavefunction.set_variable: Target variable " + key +
" already a scalar variable!")
else:
cls.set_array_variable(key, core.Matrix.from_array(_qcvar_reshape_set(key, val)))
else:
if cls.has_array_variable(key):
raise ValidationError("psi4.core.Wavefunction.set_variable: Target variable " + key +
" already an array variable!")
else:
cls.set_scalar_variable(key, val)
# TODO _qcvar_warnings(key)
def _core_del_variable(key: str) -> None:
"""Removes scalar or array QCVariable *key* from global memory if present."""
if core.has_scalar_variable(key):
core.del_scalar_variable(key)
elif core.has_array_variable(key):
core.del_array_variable(key)
def _core_wavefunction_del_variable(cls: core.Wavefunction, key: str) -> None:
"""Removes scalar or array QCVariable *key* from *cls* if present."""
if cls.has_scalar_variable(key):
cls.del_scalar_variable(key)
elif cls.has_array_variable(key):
cls.del_array_variable(key)
def _core_variables(include_deprecated_keys: bool = False) -> Dict[str, Union[float, core.Matrix, np.ndarray]]:
"""Return all scalar or array QCVariables from global memory."""
dicary = {**core.scalar_variables(), **{k: _qcvar_reshape_get(k, v) for k, v in core.array_variables().items()}}
if include_deprecated_keys:
for old_key, current_key in _qcvar_transitions.items():
if current_key in dicary:
dicary[old_key] = dicary[current_key]
return dicary
def _core_wavefunction_variables(cls, include_deprecated_keys: bool = False) -> Dict[str, Union[float, core.Matrix, np.ndarray]]:
"""Return all scalar or array QCVariables from *cls*."""
dicary = {**cls.scalar_variables(), **{k: _qcvar_reshape_get(k, v) for k, v in cls.array_variables().items()}}
if include_deprecated_keys:
for old_key, current_key in _qcvar_transitions.items():
if current_key in dicary:
dicary[old_key] = dicary[current_key]
return dicary
core.has_variable = _core_has_variable
core.variable = _core_variable
core.set_variable = _core_set_variable
core.del_variable = _core_del_variable
core.variables = _core_variables
core.Wavefunction.has_variable = _core_wavefunction_has_variable
core.Wavefunction.variable = _core_wavefunction_variable
core.Wavefunction.set_variable = _core_wavefunction_set_variable
core.Wavefunction.del_variable = _core_wavefunction_del_variable
core.Wavefunction.variables = _core_wavefunction_variables
## Psi4 v1.4 Export Deprecations
def _core_get_variable(key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.variable` instead.
"""
warnings.warn(
"Using `psi4.core.get_variable` instead of `psi4.core.variable` (or `psi4.core.scalar_variable` for scalar variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.scalar_variable(key)
def _core_get_variables():
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.variables` instead.
"""
warnings.warn(
"Using `psi4.core.get_variables` instead of `psi4.core.variables` (or `psi4.core.scalar_variables` for scalar variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.scalar_variables()
def _core_get_array_variable(key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.variable` instead.
"""
warnings.warn(
"Using `psi4.core.get_array_variable` instead of `psi4.core.variable` (or `psi4.core.array_variable` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.array_variable(key)
def _core_get_array_variables():
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.variables` instead.
"""
warnings.warn(
"Using `psi4.core.get_array_variables` instead of `psi4.core.variables` (or `psi4.core.array_variables` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.array_variables()
core.get_variable = _core_get_variable
core.get_variables = _core_get_variables
core.get_array_variable = _core_get_array_variable
core.get_array_variables = _core_get_array_variables
def _core_wavefunction_get_variable(cls, key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.variable` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.get_variable` instead of `psi4.core.Wavefunction.variable` (or `psi4.core.Wavefunction.scalar_variable` for scalar variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.scalar_variable(key)
def _core_wavefunction_get_array(cls, key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.variable` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.get_array` instead of `psi4.core.Wavefunction.variable` (or `psi4.core.Wavefunction.array_variable` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.array_variable(key)
def _core_wavefunction_set_array(cls, key, val):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.set_variable` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.set_array` instead of `psi4.core.Wavefunction.set_variable` (or `psi4.core.Wavefunction.set_array_variable` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.set_array_variable(key, val)
def _core_wavefunction_arrays(cls):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.variables` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.arrays` instead of `psi4.core.Wavefunction.variables` (or `psi4.core.Wavefunction.array_variables` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.array_variables()
core.Wavefunction.get_variable = _core_wavefunction_get_variable
core.Wavefunction.get_array = _core_wavefunction_get_array
core.Wavefunction.set_array = _core_wavefunction_set_array
core.Wavefunction.arrays = _core_wavefunction_arrays
def _core_wavefunction_frequencies(cls):
if not hasattr(cls, 'frequency_analysis'):
return None
vibinfo = cls.frequency_analysis
vibonly = qcdb.vib.filter_nonvib(vibinfo)
return core.Vector.from_array(qcdb.vib.filter_omega_to_real(vibonly['omega'].data))
def _core_wavefunction_legacy_frequencies(cls):
"""
.. deprecated:: 1.4
"""
warnings.warn(
"Using `psi4.core.Wavefunction.legacy_frequencies` (accessing c-side member data) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.legacy_frequencies()
def _core_wavefunction_set_frequencies(cls, val):
"""
.. deprecated:: 1.4
"""
warnings.warn(
"Using `psi4.core.Wavefunction.set_frequencies` (accessing c-side member data) instead of `psi4.core.Wavefunction.frequency_analysis` (py-side member data) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.set_legacy_frequencies(val)
core.Wavefunction.frequencies = _core_wavefunction_frequencies
core.Wavefunction.legacy_frequencies = _core_wavefunction_legacy_frequencies
core.Wavefunction.set_frequencies = _core_wavefunction_set_frequencies
def _core_wavefunction_X(cls):
warnings.warn(
"Using `psi4.core.Wavefunction.X` instead of `psi4.core.Wavefunction.lagrangian` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.lagrangian()
core.Wavefunction.X = _core_wavefunction_X
## Psi4 v1.3 Export Deprecations
def _core_get_gradient():
"""
.. deprecated:: 1.2
"""
warnings.warn(
"Using `psi4.core.get_gradient` (only used internally for C++ optking; deprecated silently in 1.2) is deprecated, and in 1.5 (or whenever Py optking is adopted) it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.get_legacy_gradient()
def _core_set_gradient(val):
"""
.. deprecated:: 1.2
"""
warnings.warn(
"Using `psi4.core.set_gradient` (only used internally for C++ optking; deprecated silently in 1.2) is deprecated, and in 1.5 (or whenever Py optking is adopted) it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.set_legacy_gradient(val)
core.get_gradient = _core_get_gradient
core.set_gradient = _core_set_gradient
def _core_doublet(A, B, transA, transB):
"""Multiply two matrices together.
.. deprecated:: 1.4
Use :py:func:`psi4.core.doublet` instead.
"""
warnings.warn(
"Using `psi4.core.Matrix.doublet` instead of `psi4.core.doublet` is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.doublet(A, B, transA, transB)
def _core_triplet(A, B, C, transA, transB, transC):
"""Multiply three matrices together.
.. deprecated:: 1.4
Use :py:func:`psi4.core.triplet` instead.
"""
warnings.warn(
"Using `psi4.core.Matrix.triplet` instead of `psi4.core.triplet` is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.triplet(A, B, C, transA, transB, transC)
core.Matrix.doublet = staticmethod(_core_doublet)
core.Matrix.triplet = staticmethod(_core_triplet)
|
jturney/psi4
|
psi4/driver/p4util/python_helpers.py
|
Python
|
lgpl-3.0
| 44,073
|
[
"Psi4"
] |
975c3e0fc9e52c68fd6caa347d3aa826e7e19545efc2e4297cdf8fff171a7d50
|
##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing OpenFOAM, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Xavier Besseron (University of Luxembourg)
"""
import os
import shutil
import stat
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.filetools import adjust_permissions, mkdir, run_cmd, run_cmd_qa
from easybuild.tools.modules import get_software_root
class EB_OpenFOAM(EasyBlock):
"""Support for building and installing OpenFOAM."""
def __init__(self,*args,**kwargs):
"""Specify that OpenFOAM should be built in install dir."""
super(EB_OpenFOAM, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.wm_compiler= None
self.wm_mplib = None
self.mpipath = None
self.openfoamdir = None
self.thrdpartydir = None
if 'extend' in self.name.lower():
if LooseVersion(self.version) >= LooseVersion('3.0'):
self.openfoamdir = 'foam-extend-%s' % self.version
else:
self.openfoamdir = 'OpenFOAM-%s-ext' % self.version
else:
self.openfoamdir = '-'.join([self.name, '-'.join(self.version.split('-')[:2])])
self.log.debug("openfoamdir: %s" % self.openfoamdir)
def extract_step(self):
"""Extract sources as expected by the OpenFOAM(-Extend) build scripts."""
super(EB_OpenFOAM, self).extract_step()
# make sure that the expected subdir is really there after extracting
# if not, the build scripts (e.g., the etc/bashrc being sourced) will likely fail
openfoam_installdir = os.path.join(self.installdir, self.openfoamdir)
if not os.path.exists(openfoam_installdir):
self.log.warning("Creating expected directory %s, and moving everything there" % openfoam_installdir)
try:
mkdir(openfoam_installdir)
for fil in os.listdir(self.installdir):
if fil != self.openfoamdir:
source = os.path.join(self.installdir, fil)
target = os.path.join(openfoam_installdir, fil)
self.log.debug("Moving %s to %s" % (source, target))
shutil.move(source, target)
os.chdir(openfoam_installdir)
except OSError, err:
self.log.error("Failed to move all files to %s: %s" % (openfoam_installdir, err))
def configure_step(self):
"""Configure OpenFOAM build by setting appropriate environment variables."""
# enable verbose build for debug purposes
env.setvar("FOAM_VERBOSE", "1")
# installation directory
env.setvar("FOAM_INST_DIR", self.installdir)
# third party directory
self.thrdpartydir = "ThirdParty-%s" % self.version
# only if third party stuff is actually installed
if os.path.exists(self.thrdpartydir):
os.symlink(os.path.join("..", self.thrdpartydir), self.thrdpartydir)
env.setvar("WM_THIRD_PARTY_DIR", os.path.join(self.installdir, self.thrdpartydir))
# compiler
comp_fam = self.toolchain.comp_family()
if comp_fam == toolchain.GCC: #@UndefinedVariable
self.wm_compiler="Gcc"
elif comp_fam == toolchain.INTELCOMP: #@UndefinedVariable
self.wm_compiler="Icc"
# make sure -no-prec-div is used with Intel compilers
self.cfg.update('prebuildopts', 'CFLAGS="$CFLAGS -no-prec-div" CXXFLAGS="$CXXFLAGS -no-prec-div"')
else:
self.log.error("Unknown compiler family, don't know how to set WM_COMPILER")
env.setvar("WM_COMPILER", self.wm_compiler)
# set to an MPI unknown by OpenFOAM, since we're handling the MPI settings ourselves (via mpicc, etc.)
# Note: this name must contain 'MPI' so the MPI version of the Pstream library is built (cf src/Pstream/Allwmake)
self.wm_mplib = "EASYBUILDMPI"
env.setvar("WM_MPLIB", self.wm_mplib)
# parallel build spec
env.setvar("WM_NCOMPPROCS", str(self.cfg['parallel']))
# make sure lib/include dirs for dependencies are found
openfoam_extend_v3 = 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0')
if LooseVersion(self.version) < LooseVersion("2") or openfoam_extend_v3:
self.log.debug("List of deps: %s" % self.cfg.dependencies())
for dep in self.cfg.dependencies():
self.cfg.update('prebuildopts', "%s_SYSTEM=1" % dep['name'].upper())
self.cfg.update('prebuildopts', "%(name)s_LIB_DIR=$EBROOT%(name)s/lib" % {'name': dep['name'].upper()})
self.cfg.update('prebuildopts', "%(name)s_INCLUDE_DIR=$EBROOT%(name)s/include" % {'name': dep['name'].upper()})
else:
scotch = get_software_root('SCOTCH')
if scotch:
self.cfg.update('prebuildopts', "SCOTCH_ROOT=$EBROOTSCOTCH")
def build_step(self):
"""Build OpenFOAM using make after sourcing script to set environment."""
precmd = "source %s" % os.path.join(self.builddir, self.openfoamdir, "etc", "bashrc")
# make directly in install directory
cmd_tmpl = "%(precmd)s && %(prebuildopts)s %(makecmd)s" % {
'precmd': precmd,
'prebuildopts': self.cfg['prebuildopts'],
'makecmd': os.path.join(self.builddir, self.openfoamdir, '%s'),
}
if 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
qa = {
"Proceed without compiling ParaView [Y/n]": 'Y',
"Proceed without compiling cudaSolvers? [Y/n]": 'Y',
}
noqa = [
".* -o .*\.o",
"checking .*",
"warning.*",
"configure: creating.*",
"%s .*" % os.environ['CC'],
"wmake .*",
]
run_cmd_qa(cmd_tmpl % 'Allwmake.firstInstall', qa, no_qa=noqa, log_all=True, simple=True)
else:
run_cmd(cmd_tmpl % 'Allwmake', log_all=True, simple=True, log_output=True)
def install_step(self):
"""Building was performed in install dir, so just fix permissions."""
# fix permissions of OpenFOAM dir
fullpath = os.path.join(self.installdir, self.openfoamdir)
adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True)
adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True)
# fix permissions of ThirdParty dir and subdirs (also for 2.x)
# if the thirdparty tarball is installed
fullpath = os.path.join(self.installdir, self.thrdpartydir)
if os.path.exists(fullpath):
adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True)
adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True)
def sanity_check_step(self):
"""Custom sanity check for OpenFOAM"""
psubdir = "linux64%sDPOpt" % self.wm_compiler
openfoam_extend_v3 = 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0')
if openfoam_extend_v3 or LooseVersion(self.version) < LooseVersion("2"):
toolsdir = os.path.join(self.openfoamdir, "applications", "bin", psubdir)
libsdir = os.path.join(self.openfoamdir, "lib", psubdir)
dirs = [toolsdir, libsdir]
else:
toolsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "bin")
libsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "lib")
dirs = [toolsdir, libsdir]
# some randomly selected binaries
# if one of these is missing, it's very likely something went wrong
bins = [os.path.join(self.openfoamdir, "bin", x) for x in ["foamExec", "paraFoam"]] + \
[os.path.join(toolsdir, "buoyant%sSimpleFoam" % x) for x in ["", "Boussinesq"]] + \
[os.path.join(toolsdir, "%sFoam" % x) for x in ["boundary", "engine", "sonic"]] + \
[os.path.join(toolsdir, "surface%s" % x) for x in ["Add", "Find", "Smooth"]] + \
[os.path.join(toolsdir, x) for x in ["deformedGeom", "engineSwirl", "modifyMesh",
"refineMesh", "vorticity"]]
# check for the Pstream and scotchDecomp libraries, there must be a dummy one and an mpi one
if 'extend' in self.name.lower():
libs = [os.path.join(libsdir, x, "libPstream.so") for x in ["dummy", "mpi"]] + \
[os.path.join(libsdir, "libscotchDecomp.so")]
else:
libs = [os.path.join(libsdir, x, "libPstream.so") for x in ["dummy", "mpi"]] + \
[os.path.join(libsdir, x, "libptscotchDecomp.so") for x in ["dummy", "mpi"]] +\
[os.path.join(libsdir, "libscotchDecomp.so")] + \
[os.path.join(libsdir, "dummy", "libscotchDecomp.so")]
if not 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion("2.3.0"):
# surfaceSmooth is replaced by surfaceLambdaMuSmooth is OpenFOAM v2.3.0
bins.remove(os.path.join(toolsdir, "surfaceSmooth"))
bins.append(os.path.join(toolsdir, "surfaceLambdaMuSmooth"))
custom_paths = {
'files': [os.path.join(self.openfoamdir, 'etc', x) for x in ["bashrc", "cshrc"]] + bins + libs,
'dirs': dirs,
}
super(EB_OpenFOAM, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Define extra environment variables required by OpenFOAM"""
txt = super(EB_OpenFOAM, self).make_module_extra()
env_vars = [
("WM_PROJECT_VERSION", self.version),
("FOAM_INST_DIR", "$root"),
("WM_COMPILER", self.wm_compiler),
("WM_MPLIB", self.wm_mplib),
("MPI_ARCH_PATH", self.mpipath),
("FOAM_BASH", os.path.join("$root", self.openfoamdir, "etc", "bashrc")),
("FOAM_CSH", os.path.join("$root", self.openfoamdir, "etc", "cshrc")),
]
for (env_var, val) in env_vars:
txt += self.moduleGenerator.set_environment(env_var, val)
return txt
|
geimer/easybuild-easyblocks
|
easybuild/easyblocks/o/openfoam.py
|
Python
|
gpl-2.0
| 11,828
|
[
"ParaView"
] |
3896277cd144d896090c4b326baf82e66e92b06fc3feb36f09d670751ad6b0fd
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals
from __future__ import absolute_import
"""
This module provides a class used to describe the elastic tensor,
including methods used to fit the elastic tensor from linear response
stress-strain data
"""
from pymatgen.analysis.elasticity.tensors import TensorBase, \
voigt_map as vmap, reverse_voigt_map
from pymatgen.analysis.elasticity.stress import Stress
from pymatgen.analysis.elasticity.strain import Strain
import numpy as np
import warnings
import itertools
from six.moves import range
__author__ = "Maarten de Jong"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Joseph Montoya, Shyam Dwaraknath, Mark Asta, Anubhav Jain"
__version__ = "1.0"
__maintainer__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
__status__ = "Development"
__date__ = "March 22, 2012"
class ElasticTensor(TensorBase):
"""
This class extends TensorBase to describe the 3x3x3x3
elastic tensor, C_{ij}, in Voigt-notation
"""
def __new__(cls, input_array, tol=1e-3):
"""
Create an ElasticTensor object. The constructor throws an error if
the shape of the input_matrix argument is not 3x3x3x3, i. e. in true
tensor notation. Issues a warning if the input_matrix argument does
not satisfy standard symmetries. Note that the constructor uses
__new__ rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
input_array (3x3x3x3 array-like): the 3x3x3x3 array-like
representing the elastic tensor
tol (float): tolerance for initial symmetry test of tensor
"""
obj = TensorBase(input_array).view(cls)
if obj.shape != (3, 3, 3, 3):
raise ValueError("Default elastic tensor constructor requires "
"input to be the true 3x3x3x3 representation. "
"To construct from an elastic tensor from "
"6x6 Voigt array, use ElasticTensor.from_voigt")
if not ((obj - np.transpose(obj, (1, 0, 2, 3)) < tol).all() and
(obj - np.transpose(obj, (0, 1, 3, 2)) < tol).all() and
(obj - np.transpose(obj, (1, 0, 3, 2)) < tol).all() and
(obj - np.transpose(obj, (3, 2, 0, 1)) < tol).all()):
warnings.warn("Input elasticity tensor does "
"not satisfy standard symmetries")
return obj
@property
def compliance_tensor(self):
"""
returns the compliance tensor, which is the matrix inverse of the
Voigt-notation elastic tensor
"""
return np.linalg.inv(self.voigt)
@property
def k_voigt(self):
"""
returns the K_v bulk modulus
"""
return self.voigt[:3, :3].mean()
@property
def g_voigt(self):
"""
returns the G_v shear modulus
"""
return (2. * self.voigt[:3, :3].trace() -
np.triu(self.voigt[:3, :3]).sum() +
3 * self.voigt[3:, 3:].trace()) / 15.
@property
def k_reuss(self):
"""
returns the K_r bulk modulus
"""
return 1. / self.compliance_tensor[:3, :3].sum()
@property
def g_reuss(self):
"""
returns the G_r shear modulus
"""
return 15. / (8. * self.compliance_tensor[:3, :3].trace() -
4. * np.triu(self.compliance_tensor[:3, :3]).sum() +
3. * self.compliance_tensor[3:, 3:].trace())
@property
def k_vrh(self):
"""
returns the K_vrh (Voigt-Reuss-Hill) average bulk modulus
"""
return 0.5 * (self.k_voigt + self.k_reuss)
@property
def g_vrh(self):
"""
returns the G_vrh (Voigt-Reuss-Hill) average shear modulus
"""
return 0.5 * (self.g_voigt + self.g_reuss)
@property
def kg_average(self):
"""
returns a list of Voigt, Reuss, and Voigt-Reuss-Hill averages of bulk
and shear moduli similar to legacy behavior
"""
return [self.k_voigt, self.g_voigt, self.k_reuss, self.g_reuss,
self.k_vrh, self.g_vrh]
@property
def y_mod(self):
"""
Calculates Young's modulus (in SI units) using the Voigt-Reuss-Hill
averages of bulk and shear moduli
"""
return 9.e9 * self.k_vrh * self.g_vrh / (3. * self.k_vrh + self.g_vrh)
def trans_v(self, structure):
"""
Calculates transverse sound velocity (in SI units) using the
Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: transverse sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = structure.composition.weight
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return (1e9 * self.g_vrh / mass_density) ** 0.5
def long_v(self, structure):
"""
Calculates longitudinal sound velocity (in SI units)
using the Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: longitudinal sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = structure.composition.weight
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return (1e9 * (self.k_vrh + 4./3. * self.g_vrh) / mass_density) ** 0.5
def snyder_ac(self, structure):
"""
Calculates Snyder's acoustic sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's acoustic sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
num_density = 1e30 * nsites / volume
tot_mass = sum([e.atomic_mass for e in structure.species])
avg_mass = 1.6605e-27 * tot_mass / natoms
return 0.38483*avg_mass * \
((self.long_v(structure) + 2.*self.trans_v(structure))/3.) ** 3.\
/ (300.*num_density ** (-2./3.) * nsites ** (1./3.))
def snyder_opt(self, structure):
"""
Calculates Snyder's optical sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's optical sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.66914e-23 * \
(self.long_v(structure) + 2.*self.trans_v(structure))/3. \
/ num_density ** (-2./3.) * (1 - nsites ** (-1./3.))
def snyder_total(self, structure):
"""
Calculates Snyder's total sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's total sound velocity (in SI units)
"""
return self.snyder_ac(structure) + self.snyder_opt(structure)
def clarke_thermalcond(self, structure):
"""
Calculates Clarke's thermal conductivity (in SI units)
Args:
structure: pymatgen structure object
Returns: Clarke's thermal conductivity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
tot_mass = sum([e.atomic_mass for e in structure.species])
natoms = structure.composition.num_atoms
weight = structure.composition.weight
avg_mass = 1.6605e-27 * tot_mass / natoms
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return 0.87 * 1.3806e-23 * avg_mass**(-2./3.) \
* mass_density**(1./6.) * self.y_mod**0.5
def cahill_thermalcond(self, structure):
"""
Calculates Cahill's thermal conductivity (in SI units)
Args:
structure: pymatgen structure object
Returns: Cahill's thermal conductivity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.3806e-23 / 2.48 * num_density**(2./3.) \
* (self.long_v(structure) + 2 * self.trans_v(structure))
def debye_temperature(self, structure):
"""
Calculates the debye temperature (in SI units)
Args:
structure: pymatgen structure object
Returns: debye temperature (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
tot_mass = sum([e.atomic_mass for e in structure.species])
natoms = structure.composition.num_atoms
weight = structure.composition.weight
avg_mass = 1.6605e-27 * tot_mass / natoms
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return 2.589e-11 * avg_mass**(-1./3.) * mass_density**(-1./6.) \
* self.y_mod**0.5
def debye_temperature_gibbs(self, structure):
"""
Calculates the debye temperature accordings to the GIBBS
formulation (in SI units)
Args:
structure: pymatgen structure object
Returns: debye temperature (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
tot_mass = sum([e.atomic_mass for e in structure.species])
natoms = structure.composition.num_atoms
avg_mass = 1.6605e-27 * tot_mass / natoms
t = self.homogeneous_poisson
f = (3.*(2.*(2./3.*(1. + t)/(1. - 2.*t))**(1.5) + \
(1./3.*(1. + t)/(1. - t))**(1.5))**-1) ** (1./3.)
return 2.9772e-11 * avg_mass**(-1./2.) * (volume / natoms) ** (-1./6.) \
* f * self.k_vrh**(0.5)
@property
def universal_anisotropy(self):
"""
returns the universal anisotropy value
"""
return 5. * self.g_voigt / self.g_reuss + \
self.k_voigt / self.k_reuss - 6.
@property
def homogeneous_poisson(self):
"""
returns the homogeneous poisson ratio
"""
return (1. - 2. / 3. * self.g_vrh / self.k_vrh) / \
(2. + 2. / 3. * self.g_vrh / self.k_vrh)
def energy_density(self, strain):
"""
Calculates the elastic energy density due to a strain
"""
# Conversion factor for GPa to eV/Angstrom^3
GPA_EV = 1/160.217662
with warnings.catch_warnings(record=True):
e_density = np.dot(np.transpose(Strain(strain).voigt),
np.dot(self.voigt, Strain(strain).voigt)) / 2 * GPA_EV
return e_density
@classmethod
def from_strain_stress_list(cls, strains, stresses):
"""
Class method to fit an elastic tensor from stress/strain
data. Method uses Moore-Penrose pseudoinverse to invert
the s = C*e equation with elastic tensor, stress, and
strain in voigt notation
Args:
stresses (Nx3x3 array-like): list or array of stresses
strains (Nx3x3 array-like): list or array of strains
"""
# convert the stress/strain to Nx6 arrays of voigt-notation
warnings.warn("Linear fitting of Strain/Stress lists may yield "
"questionable results from vasp data, use with caution.")
stresses = np.array([Stress(stress).voigt for stress in stresses])
with warnings.catch_warnings(record=True):
strains = np.array([Strain(strain).voigt for strain in strains])
voigt_fit = np.transpose(np.dot(np.linalg.pinv(strains), stresses))
return cls.from_voigt(voigt_fit)
@classmethod
def from_stress_dict(cls, stress_dict, tol=0.1, vasp=True, symmetry=False):
"""
Constructs the elastic tensor from IndependentStrain-Stress dictionary
corresponding to legacy behavior of elasticity package.
Args:
stress_dict (dict): dictionary of stresses indexed by corresponding
IndependentStrain objects.
tol (float): tolerance for zeroing small values of the tensor
vasp (boolean): flag for whether the stress tensor should be
converted based on vasp units/convention for stress
symmetry (boolean): flag for whether or not the elastic tensor
should fit from data based on symmetry
"""
c_ij = np.zeros((6, 6))
for i, j in itertools.product(range(6), repeat=2):
strains = [s for s in stress_dict.keys()
if (s.i, s.j) == vmap[i]]
xy = [(s[vmap[i]], stress_dict[s][vmap[j]]) for s in strains]
if len(xy) == 0:
raise ValueError("No ind. strains for vgt index {}".format(i))
elif len(xy) == 1:
xy += [(0, 0)]
c_ij[i, j] = np.polyfit(*zip(*xy), deg=1)[0]
if vasp:
c_ij *= -0.1 # Convert units/sign convention of vasp stress tensor
c_ij[0:, 3:] = 0.5 * c_ij[0:, 3:] # account for voigt doubling of e4,e5,e6
c = cls.from_voigt(c_ij)
c = c.zeroed()
return c
@property
def voigt_symmetrized(self):
"""
Reconstructs the elastic tensor by symmetrizing the voigt
notation tensor, to allow for legacy behavior
"""
v = self.voigt
new_v = 0.5 * (np.transpose(v) + v)
return ElasticTensor.from_voigt(new_v)
|
aykol/pymatgen
|
pymatgen/analysis/elasticity/elastic.py
|
Python
|
mit
| 13,916
|
[
"VASP",
"pymatgen"
] |
505a6739ccbf123ae789ac87c0d2ba46f68b6d1fad986ec83058e20bdfbca732
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/')
from data_variable_hshv2 import Fmat_original_hshv
from data_variable_hslv2 import Fmat_original_hslv
from data_variable_lshv2 import Fmat_original_lshv
from data_variable_lslv2 import Fmat_original_lslv
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((10,1)))
sigma = np.matrix(np.zeros((10,1)))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((10,n)))
DIVS = m/10
for i in range(n):
index = 0
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.1] * 10
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_hshv,sigma_rf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:81,0:15], Fmat_original_lshv[0:81,0:15], Fmat_original_lslv[0:81,0:15]))))
mu_rm_hshv,sigma_rm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:81,15:30], Fmat_original_lshv[0:81,15:16], Fmat_original_lslv[0:81,15:28]))))
mu_sf_hshv,sigma_sf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:81,30:45], Fmat_original_lshv[0:81,16:23], Fmat_original_lslv[0:81,28:37]))))
mu_sm_hshv,sigma_sm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:81,45:56], Fmat_original_lshv[0:81,23:32], Fmat_original_lslv[0:81,37:45]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = np.zeros((10,2))
B_rm_hshv = np.zeros((10,2))
B_sf_hshv = np.zeros((10,2))
B_sm_hshv = np.zeros((10,2))
for num_states in range(10):
B_rf_hshv[num_states,0] = mu_rf_hshv[num_states]
B_rf_hshv[num_states,1] = sigma_rf_hshv[num_states]
B_rm_hshv[num_states,0] = mu_rm_hshv[num_states]
B_rm_hshv[num_states,1] = sigma_rm_hshv[num_states]
B_sf_hshv[num_states,0] = mu_sf_hshv[num_states]
B_sf_hshv[num_states,1] = sigma_sf_hshv[num_states]
B_sm_hshv[num_states,0] = mu_sm_hshv[num_states]
B_sm_hshv[num_states,1] = sigma_sm_hshv[num_states]
B_rf_hshv = B_rf_hshv.tolist()
B_rm_hshv = B_rm_hshv.tolist()
B_sf_hshv = B_sf_hshv.tolist()
B_sm_hshv = B_sm_hshv.tolist()
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:81,0:15], Fmat_original_lshv[0:81,0:15], Fmat_original_lslv[0:81,0:15])))
total_seq_rm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:81,15:30], Fmat_original_lshv[0:81,15:16], Fmat_original_lslv[0:81,15:28])))
total_seq_sf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:81,30:45], Fmat_original_lshv[0:81,16:23], Fmat_original_lslv[0:81,28:37])))
total_seq_sm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:81,45:56], Fmat_original_lshv[0:81,23:32], Fmat_original_lslv[0:81,37:45])))
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = Fmat_original_hshv[0:81,:]
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[0:81,k]).T).tolist()
new_test_seq_obj_hshv = np.array(sum(test_seq_obj_hshv,[]))
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:15])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,15:26])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,26:33])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:15])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,15:26])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,26:33])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:15])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,15:26])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,26:33])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:15])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,15:26])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,26:33])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_hslv,sigma_rf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_lshv[0:81,0:15], Fmat_original_lslv[0:81,0:15]))))
mu_rm_hslv,sigma_rm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_lshv[0:81,15:16], Fmat_original_lslv[0:81,15:28]))))
mu_sf_hslv,sigma_sf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_lshv[0:81,16:23], Fmat_original_lslv[0:81,28:37]))))
mu_sm_hslv,sigma_sm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_lshv[0:81,23:32], Fmat_original_lslv[0:81,37:45]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = np.zeros((10,2))
B_rm_hslv = np.zeros((10,2))
B_sf_hslv = np.zeros((10,2))
B_sm_hslv = np.zeros((10,2))
for num_states in range(10):
B_rf_hslv[num_states,0] = mu_rf_hslv[num_states]
B_rf_hslv[num_states,1] = sigma_rf_hslv[num_states]
B_rm_hslv[num_states,0] = mu_rm_hslv[num_states]
B_rm_hslv[num_states,1] = sigma_rm_hslv[num_states]
B_sf_hslv[num_states,0] = mu_sf_hslv[num_states]
B_sf_hslv[num_states,1] = sigma_sf_hslv[num_states]
B_sm_hslv[num_states,0] = mu_sm_hslv[num_states]
B_sm_hslv[num_states,1] = sigma_sm_hslv[num_states]
B_rf_hslv = B_rf_hslv.tolist()
B_rm_hslv = B_rm_hslv.tolist()
B_sf_hslv = B_sf_hslv.tolist()
B_sm_hslv = B_sm_hslv.tolist()
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_lshv[0:81,0:15], Fmat_original_lslv[0:81,0:15])))
total_seq_rm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_lshv[0:81,15:16], Fmat_original_lslv[0:81,15:28])))
total_seq_sf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_lshv[0:81,16:23], Fmat_original_lslv[0:81,28:37])))
total_seq_sm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_lshv[0:81,23:32], Fmat_original_lslv[0:81,37:45])))
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = Fmat_original_hslv[0:81,:]
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[0:81,k]).T).tolist()
new_test_seq_obj_hslv = np.array(sum(test_seq_obj_hslv,[]))
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:56])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:56])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:56])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:56])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_lshv,sigma_rf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_hslv[0:81,0:15], Fmat_original_lslv[0:81,0:15]))))
mu_rm_lshv,sigma_rm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_hslv[0:81,15:30], Fmat_original_lslv[0:81,15:28]))))
mu_sf_lshv,sigma_sf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_hslv[0:81,30:45], Fmat_original_lslv[0:81,28:37]))))
mu_sm_lshv,sigma_sm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_hslv[0:81,45:56], Fmat_original_lslv[0:81,37:45]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = np.zeros((10,2))
B_rm_lshv = np.zeros((10,2))
B_sf_lshv = np.zeros((10,2))
B_sm_lshv = np.zeros((10,2))
for num_states in range(10):
B_rf_lshv[num_states,0] = mu_rf_lshv[num_states]
B_rf_lshv[num_states,1] = sigma_rf_lshv[num_states]
B_rm_lshv[num_states,0] = mu_rm_lshv[num_states]
B_rm_lshv[num_states,1] = sigma_rm_lshv[num_states]
B_sf_lshv[num_states,0] = mu_sf_lshv[num_states]
B_sf_lshv[num_states,1] = sigma_sf_lshv[num_states]
B_sm_lshv[num_states,0] = mu_sm_lshv[num_states]
B_sm_lshv[num_states,1] = sigma_sm_lshv[num_states]
B_rf_lshv = B_rf_lshv.tolist()
B_rm_lshv = B_rm_lshv.tolist()
B_sf_lshv = B_sf_lshv.tolist()
B_sm_lshv = B_sm_lshv.tolist()
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_hslv[0:81,0:15], Fmat_original_lslv[0:81,0:15])))
total_seq_rm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_hslv[0:81,15:30], Fmat_original_lslv[0:81,15:28])))
total_seq_sf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_hslv[0:81,30:45], Fmat_original_lslv[0:81,28:37])))
total_seq_sm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_hslv[0:81,45:56], Fmat_original_lslv[0:81,37:45])))
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = Fmat_original_lshv[0:81,:]
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[0:81,k]).T).tolist()
new_test_seq_obj_lshv = np.array(sum(test_seq_obj_lshv,[]))
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:16])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,16:23])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,23:32])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:16])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,16:23])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,23:32])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:16])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,16:23])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,23:32])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:16])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,16:23])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,23:32])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_lslv,sigma_rf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_hslv[0:81,0:15], Fmat_original_lshv[0:81,0:15]))))
mu_rm_lslv,sigma_rm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_hslv[0:81,15:30], Fmat_original_lshv[0:81,15:16]))))
mu_sf_lslv,sigma_sf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_hslv[0:81,30:45], Fmat_original_lshv[0:81,16:23]))))
mu_sm_lslv,sigma_sm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_hslv[0:81,45:56], Fmat_original_lshv[0:81,23:32]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = np.zeros((10,2))
B_rm_lslv = np.zeros((10,2))
B_sf_lslv = np.zeros((10,2))
B_sm_lslv = np.zeros((10,2))
for num_states in range(10):
B_rf_lslv[num_states,0] = mu_rf_lslv[num_states]
B_rf_lslv[num_states,1] = sigma_rf_lslv[num_states]
B_rm_lslv[num_states,0] = mu_rm_lslv[num_states]
B_rm_lslv[num_states,1] = sigma_rm_lslv[num_states]
B_sf_lslv[num_states,0] = mu_sf_lslv[num_states]
B_sf_lslv[num_states,1] = sigma_sf_lslv[num_states]
B_sm_lslv[num_states,0] = mu_sm_lslv[num_states]
B_sm_lslv[num_states,1] = sigma_sm_lslv[num_states]
B_rf_lslv = B_rf_lslv.tolist()
B_rm_lslv = B_rm_lslv.tolist()
B_sf_lslv = B_sf_lslv.tolist()
B_sm_lslv = B_sm_lslv.tolist()
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,0:15], Fmat_original_hslv[0:81,0:15], Fmat_original_lshv[0:81,0:15])))
total_seq_rm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:15], Fmat_original_hslv[0:81,15:30], Fmat_original_lshv[0:81,15:16])))
total_seq_sf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,15:26], Fmat_original_hslv[0:81,30:45], Fmat_original_lshv[0:81,16:23])))
total_seq_sm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:81,26:33], Fmat_original_hslv[0:81,45:56], Fmat_original_lshv[0:81,23:32])))
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = Fmat_original_lslv[0:81,:]
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[0:81,k]).T).tolist()
new_test_seq_obj_lslv = np.array(sum(test_seq_obj_lslv,[]))
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
# Find Viterbi Path
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:28])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,28:37])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,37:45])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:28])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,28:37])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,37:45])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:28])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,28:37])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,37:45])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:28])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,28:37])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,37:45])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_10_states.png')
pp.show()
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/Variable_Stiffness_Variable_Velocity/HMM/with 0.8s/hmm_crossvalidation_force_10_states.py
|
Python
|
mit
| 27,332
|
[
"Mayavi"
] |
4e19a8057eebd53dc96b0b6253590b331da54438b1bc1bdd750873a114c21400
|
from behave import when, given, then
from custom.django import DjangoStep
@given('I am a new user')
def impl(context):
pass
@given('I am an authenticated user')
def impl(context):
pass
@when('I visit the home page')
def impl(context):
context.browser.get(context.server_url)
@then('I will see the text "{text}"')
class impl(DjangoStep):
def impl(self, context, text):
page_text = context.browser.find_element_by_tag_name('body').text
self.assertIn(text, page_text)
__code__= impl.__code__
|
contracode/libreshop
|
libreshop/bdd_tests/features/steps/storefront.py
|
Python
|
gpl-3.0
| 533
|
[
"VisIt"
] |
027c47420d5cef2af18e3080c68d2f80b74fd8d684a8675004b95d34822c5a9c
|
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
import datetime
import ddt
import functools
import random
import pytz
import io
import json
import shutil
import tempfile
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse as django_reverse
from django.http import HttpRequest, HttpResponse
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from mock import Mock, patch
from nose.tools import raises
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import UsageKey
from xmodule.modulestore import ModuleStoreEnum
from bulk_email.models import BulkEmailFlag
from course_modes.models import CourseMode
from courseware.models import StudentModule
from courseware.tests.factories import (
BetaTesterFactory, GlobalStaffFactory, InstructorFactory, StaffFactory, UserProfileFactory
)
from courseware.tests.helpers import LoginEnrollmentTestCase
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
from shoppingcart.models import (
RegistrationCodeRedemption, Order, CouponRedemption,
PaidCourseRegistration, Coupon, Invoice, CourseRegistrationCode, CourseRegistrationCodeInvoiceItem,
InvoiceTransaction)
from shoppingcart.pdf import PDFInvoice
from student.models import (
CourseEnrollment, CourseEnrollmentAllowed, NonExistentCourseError,
ManualEnrollmentAudit, UNENROLLED_TO_ENROLLED, ENROLLED_TO_UNENROLLED,
ALLOWEDTOENROLL_TO_UNENROLLED, ENROLLED_TO_ENROLLED, UNENROLLED_TO_ALLOWEDTOENROLL,
UNENROLLED_TO_UNENROLLED, ALLOWEDTOENROLL_TO_ENROLLED
)
from student.tests.factories import UserFactory, CourseModeFactory, AdminFactory
from student.roles import CourseBetaTesterRole, CourseSalesAdminRole, CourseFinanceAdminRole, CourseInstructorRole
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.fields import Date
from courseware.models import StudentFieldOverride
import lms.djangoapps.instructor_task.api
import lms.djangoapps.instructor.views.api
from lms.djangoapps.instructor.views.api import require_finance_admin
from lms.djangoapps.instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from lms.djangoapps.instructor.views.api import _split_input_list, common_exceptions_400, generate_unique_password
from lms.djangoapps.instructor_task.api_helper import AlreadyRunningError
from certificates.tests.factories import GeneratedCertificateFactory
from certificates.models import CertificateStatuses
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohort_settings
from openedx.core.lib.xblock_utils import grade_histogram
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from .test_tools import msk_from_problem_urlname
DATE_FIELD = Date()
EXPECTED_CSV_HEADER = (
'"code","redeem_code_url","course_id","company_name","created_by","redeemed_by","invoice_id","purchaser",'
'"customer_reference_number","internal_reference"'
)
EXPECTED_COUPON_CSV_HEADER = '"Coupon Code","Course Id","% Discount","Description","Expiration Date",' \
'"Is Active","Code Redeemed Count","Total Discounted Seats","Total Discounted Amount"'
# ddt data for test cases involving reports
REPORTS_DATA = (
{
'report_type': 'grade',
'instructor_api_endpoint': 'calculate_grades_csv',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_grades_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrolled learner profile',
'instructor_api_endpoint': 'get_students_features',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_students_features_csv',
'extra_instructor_api_kwargs': {'csv': '/csv'}
},
{
'report_type': 'detailed enrollment',
'instructor_api_endpoint': 'get_enrollment_report',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_detailed_enrollment_features_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrollment',
'instructor_api_endpoint': 'get_students_who_may_enroll',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_may_enroll_csv',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'proctored exam results',
'instructor_api_endpoint': 'get_proctored_exam_results',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_proctored_exam_results_report',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'problem responses',
'instructor_api_endpoint': 'get_problem_responses',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_problem_responses_csv',
'extra_instructor_api_kwargs': {},
}
)
# ddt data for test cases involving executive summary report
EXECUTIVE_SUMMARY_DATA = (
{
'report_type': 'executive summary',
'instructor_api_endpoint': 'get_exec_summary_report',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_executive_summary_report',
'extra_instructor_api_kwargs': {}
},
)
INSTRUCTOR_GET_ENDPOINTS = set([
'get_anon_ids',
'get_coupon_codes',
'get_issued_certificates',
'get_sale_order_records',
'get_sale_records',
])
INSTRUCTOR_POST_ENDPOINTS = set([
'active_registration_codes',
'add_users_to_cohorts',
'bulk_beta_modify_access',
'calculate_grades_csv',
'change_due_date',
'export_ora2_data',
'generate_registration_codes',
'get_enrollment_report',
'get_exec_summary_report',
'get_grading_config',
'get_problem_responses',
'get_proctored_exam_results',
'get_registration_codes',
'get_student_progress_url',
'get_students_features',
'get_students_who_may_enroll',
'get_user_invoice_preference',
'list_background_email_tasks',
'list_course_role_members',
'list_email_content',
'list_entrance_exam_instructor_tasks',
'list_financial_report_downloads',
'list_forum_members',
'list_instructor_tasks',
'list_report_downloads',
'mark_student_can_skip_entrance_exam',
'modify_access',
'register_and_enroll_students',
'rescore_entrance_exam',
'rescore_problem',
'reset_due_date',
'reset_student_attempts',
'reset_student_attempts_for_entrance_exam',
'sale_validation',
'show_student_extensions',
'show_unit_extensions',
'send_email',
'spent_registration_codes',
'students_update_enrollment',
'update_forum_role_membership',
])
def reverse(endpoint, args=None, kwargs=None, is_dashboard_endpoint=True):
"""
Simple wrapper of Django's reverse that first ensures that we have declared
each endpoint under test.
Arguments:
args: The args to be passed through to reverse.
endpoint: The endpoint to be passed through to reverse.
kwargs: The kwargs to be passed through to reverse.
is_dashboard_endpoint: True if this is an instructor dashboard endpoint
that must be declared in the INSTRUCTOR_GET_ENDPOINTS or
INSTRUCTOR_GET_ENDPOINTS sets, or false otherwise.
Returns:
The return of Django's reverse function
"""
is_endpoint_declared = endpoint in INSTRUCTOR_GET_ENDPOINTS or endpoint in INSTRUCTOR_POST_ENDPOINTS
if is_dashboard_endpoint and is_endpoint_declared is False:
# Verify that all endpoints are declared so we can ensure they are
# properly validated elsewhere.
raise ValueError("The endpoint {} must be declared in ENDPOINTS before use.".format(endpoint))
return django_reverse(endpoint, args=args, kwargs=kwargs)
@common_exceptions_400
def view_success(request): # pylint: disable=unused-argument
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=unused-argument
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=unused-argument
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
@attr(shard=1)
class TestCommonExceptions400(TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
super(TestCommonExceptions400, self).setUp()
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("User does not exist", result["error"])
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("Task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
result = json.loads(resp.content)
self.assertIn("Task is already running", result["error"])
@attr(shard=1)
@ddt.ddt
class TestEndpointHttpMethods(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users can make GET requests against endpoints that allow GET,
and not against those that don't allow GET.
"""
@classmethod
def setUpClass(cls):
"""
Set up test course.
"""
super(TestEndpointHttpMethods, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
"""
Set up global staff role so authorization will not fail.
"""
super(TestEndpointHttpMethods, self).setUp()
global_user = GlobalStaffFactory()
self.client.login(username=global_user.username, password='test')
@ddt.data(*INSTRUCTOR_POST_ENDPOINTS)
def test_endpoints_reject_get(self, data):
"""
Tests that POST endpoints are rejected with 405 when using GET.
"""
url = reverse(data, kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertEqual(
response.status_code, 405,
"Endpoint {} returned status code {} instead of a 405. It should not allow GET.".format(
data, response.status_code
)
)
@ddt.data(*INSTRUCTOR_GET_ENDPOINTS)
def test_endpoints_accept_get(self, data):
"""
Tests that GET endpoints are not rejected with 405 when using GET.
"""
url = reverse(data, kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertNotEqual(
response.status_code, 405,
"Endpoint {} returned status code 405 where it shouldn't, since it should allow GET.".format(
data
)
)
@attr(shard=1)
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True))
class TestInstructorAPIDenyLevels(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIDenyLevels, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
@classmethod
def tearDownClass(cls):
super(TestInstructorAPIDenyLevels, cls).tearDownClass()
BulkEmailFlag.objects.all().delete()
def setUp(self):
super(TestInstructorAPIDenyLevels, self).setUp()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment',
{'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership',
{'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('send_email', {'send_to': '["staff"]', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('list_financial_report_downloads', {}),
('calculate_grades_csv', {}),
('get_students_features', {}),
('get_enrollment_report', {}),
('get_students_who_may_enroll', {}),
('get_exec_summary_report', {}),
('get_proctored_exam_results', {}),
('get_problem_responses', {}),
('export_ora2_data', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})
if endpoint in INSTRUCTOR_GET_ENDPOINTS:
response = self.client.get(url, args)
else:
response = self.client.post(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def _access_problem_responses_endpoint(self, msg):
"""
Access endpoint for problem responses report, ensuring that
UsageKey.from_string returns a problem key that the endpoint
can work with.
msg: message to display if assertion fails.
"""
mock_problem_key = Mock(return_value=u'')
mock_problem_key.course_key = self.course.id
with patch.object(UsageKey, 'from_string') as patched_method:
patched_method.return_value = mock_problem_key
self._access_endpoint('get_problem_responses', {}, 200, msg)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(staff_member)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
expected_status = 200
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'list_forum_members']:
continue
elif endpoint == 'get_problem_responses':
self._access_problem_responses_endpoint(
"Staff member should be allowed to access endpoint " + endpoint
)
continue
self._access_endpoint(
endpoint,
args,
expected_status,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(inst)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
expected_status = 200
# TODO: make these work
if endpoint in ['update_forum_role_membership']:
continue
elif endpoint == 'get_problem_responses':
self._access_problem_responses_endpoint(
"Instructor should be allowed to access endpoint " + endpoint
)
continue
self._access_endpoint(
endpoint,
args,
expected_status,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
expected_status = 200
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
expected_status,
"Instructor should be allowed to access endpoint " + endpoint
)
@attr(shard=1)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
class TestInstructorAPIBulkAccountCreationAndEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test Bulk account creation and enrollment from csv file
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Create a course with mode 'audit'
cls.audit_course = CourseFactory.create()
CourseModeFactory.create(course_id=cls.audit_course.id, mode_slug=CourseMode.AUDIT)
cls.url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(cls.course.id)}
)
cls.audit_course_url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(cls.audit_course.id)}
)
def setUp(self):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, self).setUp()
# Create a course with mode 'honor' and with price
self.white_label_course = CourseFactory.create()
self.white_label_course_mode = CourseModeFactory.create(
course_id=self.white_label_course.id,
mode_slug=CourseMode.HONOR,
min_price=10,
suggested_prices='10',
)
self.white_label_course_url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(self.white_label_course.id)}
)
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.audit_course_instructor = InstructorFactory(course_key=self.audit_course.id)
self.white_label_course_instructor = InstructorFactory(course_key=self.white_label_course.id)
self.client.login(username=self.instructor.username, password='test')
self.not_enrolled_student = UserFactory(
username='NotEnrolledStudent',
email='nonenrolled@test.com',
first_name='NotEnrolled',
last_name='Student'
)
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv_with_blank_lines(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "\ntest_student@example.com,test_student_1,tester1,USA\n\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_email_and_username_already_exist(self, info_log):
"""
If the email address and username already exists
and the user is enrolled in the course, do nothing (including no email gets sent out)
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with(
u"user already exists with username '%s' and email '%s'",
'test_student_1',
'test_student@example.com'
)
def test_file_upload_type_not_csv(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.jpg", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Make sure that the file you upload is in CSV format with no extraneous characters or rows.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_bad_file_upload_type(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.csv", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Could not read uploaded file.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_insufficient_data(self):
"""
Try uploading a CSV file which does not have the exact four columns of data
"""
csv_content = "test_student@example.com,test_student_1\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 1)
self.assertEquals(data['general_errors'][0]['response'], 'Data in row #1 must have exactly four columns: email, username, full name, and country')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_invalid_email_in_csv(self):
"""
Test failure case of a poorly formatted email field
"""
csv_content = "test_student.example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
data = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Invalid email {0}.'.format('test_student.example.com'))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_csv_user_exist_and_not_enrolled(self, info_log):
"""
If the email address and username already exists
and the user is not enrolled in the course, enrolled him/her and iterate to next one.
"""
csv_content = "nonenrolled@test.com,NotEnrolledStudent,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
info_log.assert_called_with(
u'user %s enrolled in the course %s',
u'NotEnrolledStudent',
self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_email_in_csv(self):
"""
If the email address already exists, but the username is different,
assume it is the correct user and just register the user in the course.
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_2,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
warning_message = 'An account with email {email} exists but the provided username {username} ' \
'is different. Enrolling anyway with {email}.'.format(email='test_student@example.com', username='test_student_2')
self.assertNotEquals(len(data['warnings']), 0)
self.assertEquals(data['warnings'][0]['response'], warning_message)
user = User.objects.get(email='test_student@example.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_username_in_csv(self):
"""
If the username already exists (but not the email),
assume it is a different user and fail to create the new account.
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
def test_csv_file_not_attached(self):
"""
Test when the user does not attach a file
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'file_not_found': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'File is not attached.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_raising_exception_in_auto_registration_and_enrollment_case(self):
"""
Test that exceptions are handled well
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
with patch('lms.djangoapps.instructor.views.api.create_manual_course_enrollment') as mock:
mock.side_effect = NonExistentCourseError()
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'NonExistentCourseError')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_generate_unique_password(self):
"""
generate_unique_password should generate a unique password string that excludes certain characters.
"""
password = generate_unique_password([], 12)
self.assertEquals(len(password), 12)
for letter in password:
self.assertNotIn(letter, 'aAeEiIoOuU1l')
def test_users_created_and_enrolled_successfully_if_others_fail(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student3@example.com,test_student_1,tester3,CA\n" \
"test_student2@example.com,test_student_2,tester2,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
self.assertTrue(User.objects.filter(username='test_student_1', email='test_student1@example.com').exists())
self.assertTrue(User.objects.filter(username='test_student_2', email='test_student2@example.com').exists())
self.assertFalse(User.objects.filter(email='test_student3@example.com').exists())
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
@patch.object(lms.djangoapps.instructor.views.api, 'generate_random_string',
Mock(side_effect=['first', 'first', 'second']))
def test_generate_unique_password_no_reuse(self):
"""
generate_unique_password should generate a unique password string that hasn't been generated before.
"""
generated_password = ['first']
password = generate_unique_password(generated_password, 12)
self.assertNotEquals(password, 'first')
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': False})
def test_allow_automated_signups_flag_not_set(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEquals(response.status_code, 403)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_audit_enrollment_mode(self):
"""
Test that enrollment mode for audit courses (paid courses) is 'audit'.
"""
# Login Audit Course instructor
self.client.login(username=self.audit_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.audit_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be 'audit'
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.AUDIT)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_honor_enrollment_mode(self):
"""
Test that enrollment mode for unpaid honor courses is 'honor'.
"""
# Remove white label course price
self.white_label_course_mode.min_price = 0
self.white_label_course_mode.suggested_prices = ''
self.white_label_course_mode.save() # pylint: disable=no-member
# Login Audit Course instructor
self.client.login(username=self.white_label_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.white_label_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be 'honor'
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.HONOR)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_default_shopping_cart_enrollment_mode_for_white_label(self):
"""
Test that enrollment mode for white label courses (paid courses) is DEFAULT_SHOPPINGCART_MODE_SLUG.
"""
# Login white label course instructor
self.client.login(username=self.white_label_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.white_label_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@attr(shard=1)
@ddt.ddt
class TestInstructorAPIEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = configuration_helpers.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = '/courses/{}/about'.format(cls.course.id)
cls.course_path = '/courses/{}/'.format(cls.course.id)
def setUp(self):
super(TestInstructorAPIEnrollment, self).setUp()
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled',
last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll',
'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll',
'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been enrolled in {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in {} "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the "
"registration form making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {proto}://{site}{about_path} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name, proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in {display_name}.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name, proto=protocol, site=self.site_name
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account,"
" you will see {display_name} listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, display_name=self.course.display_name
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in {display_name} "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student".format(
display_name=self.course.display_name,
)
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ALLOWEDTOENROLL_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course {display_name} by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org".format(
display_name=self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{about_path} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, about_path=self.about_path,
display_name=self.course.display_name,
)
)
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'enroll',
'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{course_path} and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
def test_enroll_already_enrolled_student(self):
"""
Ensure that already enrolled "verified" students cannot be downgraded
to "honor"
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# make this enrollment "verified"
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
# now re-enroll the student through the instructor dash
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
# affirm that the student is still in "verified" mode
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_ENROLLED)
self.assertEqual(course_enrollment.mode, u"verified")
def create_paid_course(self):
"""
create paid course mode.
"""
paid_course = CourseFactory.create()
CourseModeFactory.create(course_id=paid_course.id, min_price=50, mode_slug=CourseMode.HONOR)
CourseInstructorRole(paid_course.id).add_users(self.instructor)
return paid_course
def test_reason_field_should_not_be_empty(self):
"""
test to check that reason field should not be empty when
manually enrolling the students for the paid courses.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"error": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_allowed_to_enroll_user(self):
"""
test to unenroll allow to enroll user.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing..'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# now registered the user
UserFactory(email=self.notregistered_email)
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
self.assertEqual(manual_enrollments[1].state_transition, ALLOWEDTOENROLL_TO_ENROLLED)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": True,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": True,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_already_not_enrolled_user(self):
"""
test unenrolled user already not enrolled in a course.
"""
paid_course = self.create_paid_course()
course_enrollment = CourseEnrollment.objects.filter(
user__email=self.notregistered_email, course_id=paid_course.id
)
self.assertEqual(course_enrollment.count(), 0)
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'unenroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenroll_and_enroll_verified(self):
"""
Test that unenrolling and enrolling a student from a verified track
results in that student being in the default track
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# upgrade enrollment
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
self._change_student_enrollment(self.enrolled_student, self.course, 'unenroll')
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
self.assertEqual(course_enrollment.mode, CourseMode.DEFAULT_MODE_SLUG)
def _change_student_enrollment(self, user, course, action):
"""
Helper function that posts to 'students_update_enrollment' to change
a student's enrollment
"""
url = reverse(
'students_update_enrollment',
kwargs={'course_id': course.id.to_deprecated_string()},
)
params = {
'identifiers': user.email,
'action': action,
'email_students': True,
'reason': 'change user enrollment'
}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
return response
@attr(shard=1)
@ddt.ddt
class TestInstructorAPIBulkBetaEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIBulkBetaEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = configuration_helpers.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = '/courses/{}/about'.format(cls.course.id)
cls.course_path = '/courses/{}/'.format(cls.course.id)
def setUp(self):
super(TestInstructorAPIBulkBetaEnrollment, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"Visit {proto}://{site}{about_path} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
about_path=self.about_path
)
)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {}\n\nYou have been invited to be a beta tester "
"for {} at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {}".format(
self.notenrolled_student.profile.name,
self.course.display_name,
self.notenrolled_student.email,
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been removed from a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"{display_name} at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
display_name=self.course.display_name,
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@attr(shard=1)
class TestInstructorAPILevelsAccess(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPILevelsAccess, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPILevelsAccess, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
for user in [self.instructor, self.other_user]:
for identifier_attr in [user.email, user.username]:
for rolename in ["Administrator", "Moderator", "Community TA"]:
for action in ["allow", "revoke"]:
self.assert_update_forum_role_membership(user, identifier_attr, rolename, action)
def assert_update_forum_role_membership(self, current_user, identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(
url,
{
'unique_student_identifier': identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = current_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@attr(shard=1)
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestInstructorAPILevelsDataDump(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPILevelsDataDump, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPILevelsDataDump, self).setUp()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.cart = Order.get_cart_for_user(self.instructor)
self.coupon_code = 'abcde'
self.coupon = Coupon(code=self.coupon_code, description='testing code', course_id=self.course.id,
percentage_discount=10, created_by=self.instructor, is_active=True)
self.coupon.save()
# Create testing invoice 1
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
self.students_who_may_enroll = self.students + [UserFactory() for _ in range(5)]
for student in self.students_who_may_enroll:
CourseEnrollmentAllowed.objects.create(
email=student.email, course_id=self.course.id
)
def register_with_redemption_code(self, user, code):
"""
enroll user using a registration code
"""
redeem_url = reverse('shoppingcart.views.register_code_redemption', args=[code], is_dashboard_endpoint=False)
self.client.login(username=user.username, password='test')
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertIn('Activate Course Enrollment', response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
def test_invalidate_sale_record(self):
"""
Testing the sale invalidating scenario.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
data = {'invoice_number': self.sale_invoice_1.id, 'event_type': "invalidate"}
url = reverse('sale_validation', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url, method="POST", data=data)
#Now try to fetch data against not existing invoice number
test_data_1 = {'invoice_number': 100, 'event_type': "invalidate"}
self.assert_request_status_code(404, url, method="POST", data=test_data_1)
# Now invalidate the same invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("The sale associated with this invoice has already been invalidated.", response.content)
# now re_validate the invoice number
data['event_type'] = "re_validate"
self.assert_request_status_code(200, url, method="POST", data=data)
# Now re_validate the same active invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("This invoice is already active.", response.content)
test_data_2 = {'invoice_number': self.sale_invoice_1.id}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_2)
self.assertIn("Missing required event_type parameter", response.content)
test_data_3 = {'event_type': "re_validate"}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_3)
self.assertIn("Missing required invoice_number parameter", response.content)
# submitting invalid invoice number
data['invoice_number'] = 'testing'
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("invoice_number must be an integer, {value} provided".format(value=data['invoice_number']), response.content)
def test_get_sale_order_records_features_csv(self):
"""
Test that the response from get_sale_order_records is in csv format.
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
self.cart.order_type = 'business'
self.cart.save()
self.cart.add_billing_details(company_name='Test Company', company_contact_name='Test',
company_contact_email='test@123', recipient_name='R1',
recipient_email='', customer_reference_number='PO#23')
paid_course_reg_item = PaidCourseRegistration.add_to_order(
self.cart,
self.course.id,
mode_slug=CourseMode.HONOR
)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(
reverse('shoppingcart.views.update_user_cart', is_dashboard_endpoint=False),
{'ItemId': paid_course_reg_item.id, 'qty': '4'}
)
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(
reverse('shoppingcart.views.use_code', is_dashboard_endpoint=False),
{'code': coupon.code}
)
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
# get the updated item
item = self.cart.orderitem_set.all().select_subclasses()[0]
# get the redeemed coupon information
coupon_redemption = CouponRedemption.objects.select_related('coupon').filter(order=self.cart)
sale_order_url = reverse('get_sale_order_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(sale_order_url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertIn('36', response.content.split('\r\n')[1])
self.assertIn(str(item.unit_cost), response.content.split('\r\n')[1],)
self.assertIn(str(item.list_price), response.content.split('\r\n')[1],)
self.assertIn(item.status, response.content.split('\r\n')[1],)
self.assertIn(coupon_redemption[0].coupon.code, response.content.split('\r\n')[1],)
def test_coupon_redeem_count_in_ecommerce_section(self):
"""
Test that checks the redeem count in the instructor_dashboard coupon section
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
# Coupon Redeem Count only visible for Financial Admins.
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# apply the coupon code to the item in the cart
resp = self.client.post(
reverse('shoppingcart.views.use_code', is_dashboard_endpoint=False),
{'code': coupon.code}
)
self.assertEqual(resp.status_code, 200)
# URL for instructor dashboard
instructor_dashboard = reverse(
'instructor_dashboard',
kwargs={'course_id': self.course.id.to_deprecated_string()},
is_dashboard_endpoint=False
)
# visit the instructor dashboard page and
# check that the coupon redeem count should be 0
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>0</td>', resp.content)
# now make the payment of your cart items
self.cart.purchase()
# visit the instructor dashboard page and
# check that the coupon redeem count should be 1
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>1</td>', resp.content)
def test_get_sale_records_features_csv(self):
"""
Test that the response from get_sale_records is in csv format.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse(
'get_sale_records',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.post(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_sale_records_features_json(self):
"""
Test that the response from get_sale_records is in json format.
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
for res in res_json['sale']:
self.validate_sale_records_response(
res,
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
def test_get_sale_records_features_with_multiple_invoices(self):
"""
Test that the response from get_sale_records is in json format for multiple invoices
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='qwerty{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
# Create test invoice 2
sale_invoice_2 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw_2', recipient_email='test2@test.com', customer_reference_number='2Fwe23S',
internal_reference="B", course_id=self.course.id
)
invoice_item_2 = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=sale_invoice_2,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='xyzmn{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=sale_invoice_2, invoice_item=invoice_item_2, mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
self.validate_sale_records_response(
res_json['sale'][0],
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
self.validate_sale_records_response(
res_json['sale'][1],
course_registration_code,
sale_invoice_2,
0,
invoice_item=invoice_item_2
)
def validate_sale_records_response(self, res, course_registration_code, invoice, used_codes, invoice_item):
"""
validate sale records attribute values with the response object
"""
self.assertEqual(res['total_amount'], invoice.total_amount)
self.assertEqual(res['recipient_email'], invoice.recipient_email)
self.assertEqual(res['recipient_name'], invoice.recipient_name)
self.assertEqual(res['company_name'], invoice.company_name)
self.assertEqual(res['company_contact_name'], invoice.company_contact_name)
self.assertEqual(res['company_contact_email'], invoice.company_contact_email)
self.assertEqual(res['internal_reference'], invoice.internal_reference)
self.assertEqual(res['customer_reference_number'], invoice.customer_reference_number)
self.assertEqual(res['invoice_number'], invoice.id)
self.assertEqual(res['created_by'], course_registration_code.created_by.username)
self.assertEqual(res['course_id'], invoice_item.course_id.to_deprecated_string())
self.assertEqual(res['total_used_codes'], used_codes)
self.assertEqual(res['total_codes'], 5)
def test_get_problem_responses_invalid_location(self):
"""
Test whether get_problem_responses returns an appropriate status
message when users submit an invalid problem location.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
problem_location = ''
response = self.client.post(url, {'problem_location': problem_location})
res_json = json.loads(response.content)
self.assertEqual(res_json, 'Could not find problem with this location.')
def valid_problem_location(test): # pylint: disable=no-self-argument
"""
Decorator for tests that target get_problem_responses endpoint and
need to pretend user submitted a valid problem location.
"""
@functools.wraps(test)
def wrapper(self, *args, **kwargs):
"""
Run `test` method, ensuring that UsageKey.from_string returns a
problem key that the get_problem_responses endpoint can
work with.
"""
mock_problem_key = Mock(return_value=u'')
mock_problem_key.course_key = self.course.id
with patch.object(UsageKey, 'from_string') as patched_method:
patched_method.return_value = mock_problem_key
test(self, *args, **kwargs)
return wrapper
@valid_problem_location
def test_get_problem_responses_successful(self):
"""
Test whether get_problem_responses returns an appropriate status
message if CSV generation was started successfully.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
problem_location = ''
response = self.client.post(url, {'problem_location': problem_location})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
status = res_json['status']
self.assertIn('is being created', status)
self.assertNotIn('already in progress', status)
@valid_problem_location
def test_get_problem_responses_already_running(self):
"""
Test whether get_problem_responses returns an appropriate status
message if CSV generation is already in progress.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
with patch('lms.djangoapps.instructor_task.api.submit_calculate_problem_responses_csv') as submit_task_function:
error = AlreadyRunningError()
submit_task_function.side_effect = error
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertIn('already in progress', res_json['status'])
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
for student in self.students:
student.profile.city = "Mos Eisley {}".format(student.id)
student.profile.save()
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
self.assertEqual(student_json['city'], student.profile.city)
self.assertEqual(student_json['country'], "")
@ddt.data(True, False)
def test_get_students_features_cohorted(self, is_cohorted):
"""
Test that get_students_features includes cohort info when the course is
cohorted, and does not when the course is not cohorted.
"""
url = reverse('get_students_features', kwargs={'course_id': unicode(self.course.id)})
set_course_cohort_settings(self.course.id, is_cohorted=is_cohorted)
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertEqual('cohort' in res_json['feature_names'], is_cohorted)
@ddt.data(True, False)
def test_get_students_features_teams(self, has_teams):
"""
Test that get_students_features includes team info when the course is
has teams enabled, and does not when the course does not have teams enabled
"""
if has_teams:
self.course = CourseFactory.create(teams_configuration={
'max_size': 2, 'topics': [{'topic-id': 'topic', 'name': 'Topic', 'description': 'A Topic'}]
})
course_instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=course_instructor.username, password='test')
url = reverse('get_students_features', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertEqual('team' in res_json['feature_names'], has_teams)
def test_get_students_who_may_enroll(self):
"""
Test whether get_students_who_may_enroll returns an appropriate
status message when users request a CSV file of students who
may enroll in a course.
"""
url = reverse(
'get_students_who_may_enroll',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertNotIn('currently being created', res_json['status'])
# CSV generation already in progress:
with patch('lms.djangoapps.instructor_task.api.submit_calculate_may_enroll_csv') as submit_task_function:
error = AlreadyRunningError()
submit_task_function.side_effect = error
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertIn('currently being created', res_json['status'])
def test_get_student_exam_results(self):
"""
Test whether get_proctored_exam_results returns an appropriate
status message when users request a CSV file.
"""
url = reverse(
'get_proctored_exam_results',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertNotIn('currently being created', res_json['status'])
# CSV generation already in progress:
with patch('lms.djangoapps.instructor_task.api.submit_proctored_exam_results_report') as submit_task_function:
error = AlreadyRunningError()
submit_task_function.side_effect = error
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
self.assertIn('currently being created', res_json['status'])
def test_access_course_finance_admin_with_invalid_course_key(self):
"""
Test assert require_course fiance_admin before generating
a detailed enrollment report
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'invalid_course_key')
self.assertEqual(response.status_code, 404)
self.assertFalse(func.called)
def mock_request(self):
"""
mock request
"""
request = Mock()
request.user = self.instructor
return request
def test_access_course_finance_admin_with_valid_course_key(self):
"""
Test to check the course_finance_admin role with valid key
but doesn't have access to the function
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'valid/course/key')
self.assertEqual(response.status_code, 403)
self.assertFalse(func.called)
def test_add_user_to_fiance_admin_role_with_valid_course(self):
"""
test to check that a function is called using a fiance_admin
rights.
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
decorated_func(request, self.course.id.to_deprecated_string())
self.assertTrue(func.called)
def test_enrollment_report_features_csv(self):
"""
test to generate enrollment report.
enroll users, admin staff using registration codes.
"""
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
admin_user = AdminFactory()
admin_cart = Order.get_cart_for_user(admin_user)
PaidCourseRegistration.add_to_order(admin_cart, self.course.id)
admin_cart.purchase()
# create a new user/student and enroll
# in the course using a registration code
# and then validates the generated detailed enrollment report
test_user = UserFactory()
self.register_with_redemption_code(test_user, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
UserProfileFactory.create(user=self.students[0], meta='{"company": "asdasda"}')
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_bulk_purchase_detailed_report(self):
"""
test to generate detailed enrollment report.
1 Purchase registration codes.
2 Enroll users via registration code.
3 Validate generated enrollment report.
"""
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(
reverse('shoppingcart.views.update_user_cart', is_dashboard_endpoint=False),
{'ItemId': paid_course_reg_item.id, 'qty': '4'}
)
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(
reverse('shoppingcart.views.use_code', is_dashboard_endpoint=False),
{'code': self.coupon_code}
)
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=self.cart)
self.register_with_redemption_code(self.instructor, course_reg_codes[0].code)
test_user = UserFactory()
test_user_cart = Order.get_cart_for_user(test_user)
PaidCourseRegistration.add_to_order(test_user_cart, self.course.id)
test_user_cart.purchase()
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=-self.sale_invoice_1.total_amount,
status='refunded',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_create_registration_code_without_invoice_and_order(self):
"""
test generate detailed enrollment report,
used a registration codes which has been created via invoice or bulk
purchase scenario.
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_invoice_payment_is_still_pending_for_registration_codes(self):
"""
test generate enrollment report
enroll a user in a course using registration code
whose invoice has not been paid yet
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
@patch.object(lms.djangoapps.instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(lms.djangoapps.instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"{user_id}","41","42"\n'.format(user_id=self.students[0].id)
))
self.assertTrue(
body.endswith('"{user_id}","41","42"\n'.format(user_id=self.students[-1].id))
)
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('lms.djangoapps.instructor_task.models.DjangoStorageReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.post(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
@ddt.data(*REPORTS_DATA)
@ddt.unpack
@valid_problem_location
def test_calculate_report_csv_success(self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
success_status = "The {report_type} report is being created.".format(report_type=report_type)
if report_type == 'problem responses':
with patch(task_api_endpoint):
response = self.client.post(url, {'problem_location': ''})
self.assertIn(success_status, response.content)
else:
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.post(url, {})
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_success(
self,
report_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.post(url, {})
success_status = "The {report_type} report is being created." \
" To view the status of the report, see Pending" \
" Tasks below".format(report_type=report_type)
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_already_running(
self,
report_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint) as mock:
mock.side_effect = AlreadyRunningError()
response = self.client.post(url, {})
already_running_status = "The {report_type} report is currently being created." \
" To view the status of the report, see Pending Tasks below." \
" You will be able to download the report" \
" when it is" \
" complete.".format(report_type=report_type)
self.assertIn(already_running_status, response.content)
def test_get_ora2_responses_success(self):
url = reverse('export_ora2_data', kwargs={'course_id': unicode(self.course.id)})
with patch('lms.djangoapps.instructor_task.api.submit_export_ora2_data') as mock_submit_ora2_task:
mock_submit_ora2_task.return_value = True
response = self.client.post(url, {})
success_status = "The ORA data report is being generated."
self.assertIn(success_status, response.content)
def test_get_ora2_responses_already_running(self):
url = reverse('export_ora2_data', kwargs={'course_id': unicode(self.course.id)})
with patch('lms.djangoapps.instructor_task.api.submit_export_ora2_data') as mock_submit_ora2_task:
mock_submit_ora2_task.side_effect = AlreadyRunningError()
response = self.client.post(url, {})
already_running_status = "An ORA data report generation task is already in progress."
self.assertIn(already_running_status, response.content)
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'unique_student_identifier': self.students[0].email.encode("utf-8")}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'unique_student_identifier': self.students[0].username.encode("utf-8")}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
@attr(shard=1)
class TestInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIRegradeTask, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
def setUp(self):
super(TestInstructorAPIRegradeTask, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(lms.djangoapps.instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@patch('lms.djangoapps.grades.signals.handlers.PROBLEM_WEIGHTED_SCORE_CHANGED.send')
def test_reset_student_attempts_delete(self, _mock_signal):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_course_has_entrance_exam_in_student_attempts_reset(self):
""" Test course has entrance exam id set while resetting attempts"""
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'delete_module': False,
})
self.assertEqual(response.status_code, 400)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test course has entrance exam id set while re-scoring. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@attr(shard=1)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
@ddt.ddt
class TestEntranceExamInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can rescore student grades,
reset student attempts and delete state for entrance exam.
"""
@classmethod
def setUpClass(cls):
super(TestEntranceExamInstructorAPIRegradeTask, cls).setUpClass()
cls.course = CourseFactory.create(
org='test_org',
course='test_course',
run='test_run',
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.course_with_invalid_ee = CourseFactory.create(entrance_exam_id='invalid_exam')
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.entrance_exam = ItemFactory.create(
parent=cls.course,
category='chapter',
display_name='Entrance exam'
)
subsection = ItemFactory.create(
parent=cls.entrance_exam,
category='sequential',
display_name='Subsection 1'
)
vertical = ItemFactory.create(
parent=subsection,
category='vertical',
display_name='Vertical 1'
)
cls.ee_problem_1 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 1"
)
cls.ee_problem_2 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 2"
)
def setUp(self):
super(TestEntranceExamInstructorAPIRegradeTask, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
# Add instructor to invalid ee course
CourseInstructorRole(self.course_with_invalid_ee.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
ee_module_to_reset1 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_1.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
ee_module_to_reset2 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_2.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
self.ee_modules = [ee_module_to_reset1.module_state_key, ee_module_to_reset2.module_state_key]
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_grade_histogram(self, store):
"""
Verify that a histogram has been created.
"""
course = CourseFactory.create(default_store=store)
usage_key = course.id.make_usage_key('problem', 'first_problem')
StudentModule.objects.create(
student_id=1,
grade=100,
module_state_key=usage_key
)
StudentModule.objects.create(
student_id=2,
grade=50,
module_state_key=usage_key
)
grades = grade_histogram(usage_key)
self.assertEqual(grades[0], (50.0, 1))
self.assertEqual(grades[1], (100.0, 1))
def test_reset_entrance_exam_student_attempts_delete_all(self):
""" Make sure no one can delete all students state on entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_entrance_exam_student_attempts_single(self):
""" Test reset single student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
for changed_module in changed_modules:
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(lms.djangoapps.instructor_task.api, 'submit_reset_problem_attempts_in_entrance_exam')
def test_reset_entrance_exam_all_student_attempts(self, act):
""" Test reset all student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_invalid_entrance_exam(self):
""" Test reset for invalid entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_entrance_exam_student_delete_state(self):
""" Test delete single student entrance exam state. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
self.assertEqual(changed_modules.count(), 0)
def test_entrance_exam_delete_state_with_staff(self):
""" Test entrance exam delete state failure with staff access. """
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 403)
def test_entrance_exam_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_entrance_exam_for_student')
def test_rescore_entrance_exam_single_student(self, act):
""" Test re-scoring of entrance exam for single student. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_rescore_entrance_exam_all_student(self):
""" Test rescoring for all students. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_if_higher_all_student(self):
""" Test rescoring for all students only if higher. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'only_if_higher': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_all_student_and_single(self):
""" Test re-scoring with both all students and single student parameters. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test re-scoring of entrance exam with invalid exam. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_list_entrance_exam_instructor_tasks_student(self):
""" Test list task history for entrance exam AND student. """
# create a re-score entrance exam task
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0]['status'], _('Complete'))
def test_list_entrance_exam_instructor_tasks_all_student(self):
""" Test list task history for entrance exam AND all student. """
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 0)
def test_list_entrance_exam_instructor_with_invalid_exam_key(self):
""" Test list task history for entrance exam failure if course has invalid exam. """
url = reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_skip_entrance_exam_student(self):
""" Test skip entrance exam api for student. """
# create a re-score entrance exam task
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
message = _('This student (%s) will skip the entrance exam.') % self.student.email
self.assertContains(response, message)
# post again with same student
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
# This time response message should be different
message = _('This student (%s) is already allowed to skip the entrance exam.') % self.student.email
self.assertContains(response, message)
@attr(shard=1)
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True))
class TestInstructorSendEmail(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorSendEmail, cls).setUpClass()
cls.course = CourseFactory.create()
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
cls.full_test_message = {
'send_to': '["myself", "staff"]',
'subject': test_subject,
'message': test_message,
}
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
@classmethod
def tearDownClass(cls):
super(TestInstructorSendEmail, cls).tearDownClass()
BulkEmailFlag.objects.all().delete()
def setUp(self):
super(TestInstructorSendEmail, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_invalid_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': '["invalid_target", "staff"]',
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': '["staff"]',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': '["staff"]',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@attr(shard=1)
class TestInstructorAPITaskLists(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
@classmethod
def setUpClass(cls):
super(TestInstructorAPITaskLists, cls).setUpClass()
cls.course = CourseFactory.create(
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
def setUp(self):
super(TestInstructorAPITaskLists, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
@patch.object(lms.djangoapps.instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@attr(shard=1)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history', autospec=True)
class TestInstructorEmailContentList(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorEmailContentList, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorEmailContentList, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def setup_fake_email_info(self, num_emails, with_failures=False):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
if with_failures:
failed = random.randint(1, 15401)
else:
failed = 0
self.tasks[email_id] = FakeContentTask(email_id, num_sent, failed, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent, failed)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request, with_failures=False):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails, with_failures)
task_history_request.return_value = self.tasks.values()
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('lms.djangoapps.instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
return response
def check_emails_sent(self, num_emails, task_history_request, with_failures=False):
""" Tests sending emails with or without failures """
response = self.get_email_content_response(num_emails, task_history_request, with_failures)
self.assertTrue(task_history_request.called)
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content)['emails']
self.assertEqual(len(actual_email_info), num_emails)
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
self.assertEqual(expected_email_info, actual_email_info)
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should have one email
self.assertEqual(len(email_info), 1)
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info[u'email'][u'html_message']
self.assertEqual(expected_message, received_message)
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should be empty
self.assertEqual(len(email_info), 0)
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
self.check_emails_sent(50, task_history_request)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_email_info = json.loads(response.content)['emails']
self.assertEqual(len(returned_email_info), 1)
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent', 'requester']:
self.assertEqual(returned_info[info], None)
def test_list_email_with_failure(self, task_history_request):
""" Test the handling of email task that had failures """
self.check_emails_sent(1, task_history_request, True)
def test_list_many_emails_with_failures(self, task_history_request):
""" Test the handling of many emails with failures """
self.check_emails_sent(50, task_history_request, True)
def test_list_email_with_no_successes(self, task_history_request):
task_info = FakeContentTask(0, 0, 10, 'expected')
email = FakeEmail(0)
email_info = FakeEmailInfo(email, 0, 10)
task_history_request.return_value = [task_info]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('lms.djangoapps.instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.return_value = email
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_info_list = json.loads(response.content)['emails']
self.assertEqual(len(returned_info_list), 1)
returned_info = returned_info_list[0]
expected_info = email_info.to_dict()
self.assertDictEqual(expected_info, returned_info)
@attr(shard=1)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append(
"Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus',
'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
[u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = SlashSeparatedCourseKey('MITx', '6.002x', '2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(msk_from_problem_urlname(course_id, name).to_deprecated_string(), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
msk_from_problem_urlname(*args)
def get_extended_due(course, unit, user):
"""
Gets the overridden due date for the given user on the given unit. Returns
`None` if there is no override set.
"""
try:
override = StudentFieldOverride.objects.get(
course_id=course.id,
student=user,
location=unit.location,
field='due'
)
return DATE_FIELD.from_json(json.loads(override.value))
except StudentFieldOverride.DoesNotExist:
return None
@attr(shard=1)
class TestDueDateExtensions(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
@classmethod
def setUpClass(cls):
super(TestDueDateExtensions, cls).setUpClass()
cls.course = CourseFactory.create()
cls.due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.week1 = ItemFactory.create(due=cls.due)
cls.week2 = ItemFactory.create(due=cls.due)
cls.week3 = ItemFactory.create() # No due date
cls.course.children = [
cls.week1.location.to_deprecated_string(),
cls.week2.location.to_deprecated_string(),
cls.week3.location.to_deprecated_string()
]
cls.homework = ItemFactory.create(
parent_location=cls.week1.location,
due=cls.due
)
cls.week1.children = [cls.homework.location.to_deprecated_string()]
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensions, self).setUp()
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_change_to_invalid_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '01/01/2009 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_change_nonexistent_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week3.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week3, self.user1)
)
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_reset_nonexistent_extension(self):
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 400, response.content)
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'url': self.week1.location.to_deprecated_string()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@attr(shard=1)
class TestDueDateExtensionsDeletedDate(ModuleStoreTestCase, LoginEnrollmentTestCase):
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensionsDeletedDate, self).setUp()
self.course = CourseFactory.create()
self.due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.week1 = ItemFactory.create(due=self.due)
self.week2 = ItemFactory.create(due=self.due)
self.week3 = ItemFactory.create() # No due date
self.course.children = [
self.week1.location.to_deprecated_string(),
self.week2.location.to_deprecated_string(),
self.week3.location.to_deprecated_string()
]
self.homework = ItemFactory.create(
parent_location=self.week1.location,
due=self.due
)
self.week1.children = [self.homework.location.to_deprecated_string()]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_reset_extension_to_deleted_date(self):
"""
Test that we can delete a due date extension after deleting the normal
due date, without causing an error.
"""
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
self.week1.due = None
self.week1 = self.store.update_item(self.week1, self.user1.id)
# Now, week1's normal due date is deleted but the extension still exists.
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
@attr(shard=1)
class TestCourseIssuedCertificatesData(SharedModuleStoreTestCase):
"""
Test data dumps for issued certificates.
"""
@classmethod
def setUpClass(cls):
super(TestCourseIssuedCertificatesData, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCourseIssuedCertificatesData, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def generate_certificate(self, course_id, mode, status):
"""
Generate test certificate
"""
test_user = UserFactory()
GeneratedCertificateFactory.create(
user=test_user,
course_id=course_id,
mode=mode,
status=status
)
def test_certificates_features_against_status(self):
"""
Test certificates with status 'downloadable' should be in the response.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.generating)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 0)
# Certificates with status 'downloadable' should be in response.
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 1)
def test_certificates_features_group_by_mode(self):
"""
Test for certificate csv features against mode. Certificates should be group by 'mode' in reponse.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 1)
# retrieve the first certificate from the list, there should be 3 certificates for 'honor' mode.
certificate = res_json['certificates'][0]
self.assertEqual(certificate.get('total_issued_certificate'), 3)
self.assertEqual(certificate.get('mode'), 'honor')
self.assertEqual(certificate.get('course_id'), str(self.course.id))
# Now generating downloadable certificates with 'verified' mode
for __ in xrange(certificate_count):
self.generate_certificate(
course_id=self.course.id,
mode='verified',
status=CertificateStatuses.downloadable
)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
# total certificate count should be 2 for 'verified' mode.
self.assertEqual(len(res_json['certificates']), 2)
# retrieve the second certificate from the list
certificate = res_json['certificates'][1]
self.assertEqual(certificate.get('total_issued_certificate'), 3)
self.assertEqual(certificate.get('mode'), 'verified')
def test_certificates_features_csv(self):
"""
Test for certificate csv features.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
current_date = datetime.date.today().strftime("%B %d, %Y")
response = self.client.get(url, {'csv': 'true'})
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(response['Content-Disposition'], 'attachment; filename={0}'.format('issued_certificates.csv'))
self.assertEqual(
response.content.strip(),
'"CourseID","Certificate Type","Total Certificates Issued","Date Report Run"\r\n"'
+ str(self.course.id) + '","honor","3","' + current_date + '"'
)
@attr(shard=1)
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodes(SharedModuleStoreTestCase):
"""
Test data dumps for E-commerce Course Registration Codes.
"""
@classmethod
def setUpClass(cls):
super(TestCourseRegistrationCodes, cls).setUpClass()
cls.course = CourseFactory.create()
cls.url = reverse(
'generate_registration_codes',
kwargs={'course_id': cls.course.id.to_deprecated_string()}
)
def setUp(self):
"""
Fixtures.
"""
super(TestCourseRegistrationCodes, self).setUp()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
CourseSalesAdminRole(self.course.id).add_users(self.instructor)
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street',
'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(self.url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(5):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(5):
i += 1
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
@override_settings(FINANCE_EMAIL='finance@example.com')
def test_finance_email_in_recipient_list_when_generating_registration_codes(self):
"""
Test to verify that the invoice will also be sent to the FINANCE_EMAIL when
generating registration codes
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# check for the last mail.outbox, The FINANCE_EMAIL has been appended at the
# very end, when generating registration codes
self.assertEqual(mail.outbox[-1].to[0], 'finance@example.com')
def test_user_invoice_copy_preference(self):
"""
Test to remember user invoice copy preference
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
# user invoice copy preference will be saved in api user preference; model
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], True)
# updating the user invoice copy preference during code generation flow
data['invoice'] = ''
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], False)
def test_generate_course_registration_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
def test_generate_course_registration_with_redeem_url_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
rows = body.split('\n')
index = 1
while index < len(rows):
if rows[index]:
row_data = rows[index].split(',')
code = row_data[0].replace('"', '')
self.assertTrue(row_data[1].startswith('"http')
and row_data[1].endswith('/shoppingcart/register/redeem/{0}/"'.format(code)))
index += 1
@patch.object(lms.djangoapps.instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'second', 'third', 'fourth']))
def test_generate_course_registration_codes_matching_existing_coupon_code(self):
"""
Test the generated course registration code is already in the Coupon Table
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
coupon = Coupon(code='first', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor)
coupon.save()
data = {
'total_registration_codes': 3, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 5) # 1 for headers, 1 for new line at the end and 3 for the actual data
@patch.object(lms.djangoapps.instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'first', 'second', 'third']))
def test_generate_course_registration_codes_integrity_error(self):
"""
Test for the Integrity error against the generated code
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 2, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 4)
def test_spent_course_registration_codes_csv(self):
"""
Test to generate a response of all the spent course registration codes
"""
url = reverse('spent_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'spent_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 7)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'unit_price': 122.45, 'company_contact_email': 'Test@company.com', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(9):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(9):
i += 13
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
data = {'spent_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_active_course_registration_codes_csv(self):
"""
Test to generate a response of all the active course registration codes
"""
url = reverse('active_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'active_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 9)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'active_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_get_all_course_registration_codes_csv(self):
"""
Test to generate a response of all the course registration codes
"""
url = reverse(
'get_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {'download_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 14)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'download_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_pdf_file_throws_exception(self):
"""
test to mock the pdf file generation throws an exception
when generating registration codes.
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
with patch.object(PDFInvoice, 'generate_pdf', side_effect=Exception):
response = self.client.post(generate_code_url, data)
self.assertEqual(response.status_code, 200, response.content)
def test_get_codes_with_sale_invoice(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 5.5, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
url = reverse('get_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'download_company_name': 'Group Invoice'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
def test_with_invalid_unit_price(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 10, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 'invalid', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 400, response.content)
self.assertIn('Could not parse amount as', response.content)
def test_get_historical_coupon_codes(self):
"""
Test to download a response of all the active coupon codes
"""
get_coupon_code_url = reverse(
'get_coupon_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
for i in range(10):
coupon = Coupon(
code='test_code{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True
)
coupon.save()
#now create coupons with the expiration dates
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
expiration_date=datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=2)
)
coupon.save()
response = self.client.post(get_coupon_code_url)
self.assertEqual(response.status_code, 200, response.content)
# filter all the coupons
for coupon in Coupon.objects.all():
self.assertIn(
'"{coupon_code}","{course_id}","{discount}","{description}","{expiration_date}","{is_active}",'
'"{code_redeemed_count}","{total_discounted_seats}","{total_discounted_amount}"'.format(
coupon_code=coupon.code,
course_id=coupon.course_id,
discount=coupon.percentage_discount,
description=coupon.description,
expiration_date=coupon.display_expiry_date,
is_active=coupon.is_active,
code_redeemed_count="0",
total_discounted_seats="0",
total_discounted_amount="0",
), response.content
)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_COUPON_CSV_HEADER))
@attr(shard=1)
class TestBulkCohorting(SharedModuleStoreTestCase):
"""
Test adding users to cohorts in bulk via CSV upload.
"""
@classmethod
def setUpClass(cls):
super(TestBulkCohorting, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestBulkCohorting, self).setUp()
self.staff_user = StaffFactory(course_key=self.course.id)
self.non_staff_user = UserFactory.create()
self.tempdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tempdir)
def call_add_users_to_cohorts(self, csv_data, suffix='.csv'):
"""
Call `add_users_to_cohorts` with a file generated from `csv_data`.
"""
# this temporary file will be removed in `self.tearDown()`
__, file_name = tempfile.mkstemp(suffix=suffix, dir=self.tempdir)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
with open(file_name, 'r') as file_pointer:
url = reverse('add_users_to_cohorts', kwargs={'course_id': unicode(self.course.id)})
return self.client.post(url, {'uploaded-file': file_pointer})
def expect_error_on_file_content(self, file_content, error, file_suffix='.csv'):
"""
Verify that we get the error we expect for a given file input.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content, suffix=file_suffix)
self.assertEqual(response.status_code, 400)
result = json.loads(response.content)
self.assertEqual(result['error'], error)
def verify_success_on_file_content(self, file_content, mock_store_upload, mock_cohort_task):
"""
Verify that `addd_users_to_cohorts` successfully validates the
file content, uploads the input file, and triggers the
background task.
"""
mock_store_upload.return_value = (None, 'fake_file_name.csv')
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content)
self.assertEqual(response.status_code, 204)
self.assertTrue(mock_store_upload.called)
self.assertTrue(mock_cohort_task.called)
def test_no_cohort_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a cohort field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'username,email\n', "The file must contain a 'cohort' column containing cohort names."
)
def test_no_username_or_email_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a username or email field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'cohort\n', "The file must contain a 'username' column, an 'email' column, or both."
)
def test_empty_csv(self):
"""
Verify that we get a descriptive verification error when we haven't
included any data in the uploaded CSV.
"""
self.expect_error_on_file_content(
'', "The file must contain a 'cohort' column containing cohort names."
)
def test_wrong_extension(self):
"""
Verify that we get a descriptive verification error when we haven't
uploaded a file with a '.csv' extension.
"""
self.expect_error_on_file_content(
'', "The file must end with the extension '.csv'.", file_suffix='.notcsv'
)
def test_non_staff_no_access(self):
"""
Verify that we can't access the view when we aren't a staff user.
"""
self.client.login(username=self.non_staff_user.username, password='test')
response = self.call_add_users_to_cohorts('')
self.assertEqual(response.status_code, 403)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_username(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call a background task when
the CSV has username and cohort columns.
"""
self.verify_success_on_file_content(
'username,cohort\nfoo_username,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has email and cohort columns.
"""
self.verify_success_on_file_content(
'email,cohort\nfoo_email,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_username_and_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has username, email and cohort columns.
"""
self.verify_success_on_file_content(
'username,email,cohort\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_carriage_return(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns.
"""
self.verify_success_on_file_content(
'username,email,cohort\rfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_carriage_return_line_feed(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns and line
feeds.
"""
self.verify_success_on_file_content(
'username,email,cohort\r\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
|
synergeticsedx/deployment-wipro
|
lms/djangoapps/instructor/tests/test_api.py
|
Python
|
agpl-3.0
| 224,197
|
[
"VisIt"
] |
3b2c8b49bba9f8326b586f9fda051f5d7291597c28c557e2c0f1b9157f3e0c35
|
''' A moduled used by maxent.py and phlearn.py to find the ideal weights for a tableau.
'''
import megatableau
import scipy, scipy.optimize
import math
import numpy as np
### HELPER FUNCTIONS FOR CALCULATING PROBABILITY ###
def maxent_value(weights, tableau, ur, sr):
""" Compute maxent value P* = exp(harmony) for a particular UR/SR pair.
"""
harmony = 0
very_very_tiny_number = np.finfo(np.double).tiny # Approximately 2.2e-308
for c in tableau[ur][sr][1]:
harmony += weights[c] * tableau[ur][sr][1][c]
return math.exp(harmony) + very_very_tiny_number # Makes positive any "0" results created by roundoff error.
def z_score(tableau, ur):
""" Compute the Z-score for a particular UR, using current maxent values.
"""
zScore = 0
for j in tableau[ur]:
zScore += tableau[ur][j][2]
return zScore
def update_maxent_values(weights, tableau):
""" Computes maxent value P* = exp(harmony) for all UR/SR pairs
in a supplied tableau, and updates the tableau with these values.
"""
for ur in tableau:
for sr in tableau[ur]:
tableau[ur][sr][2] = maxent_value(weights, tableau, ur, sr)
### OBJECTIVE FUNCTION(S) ###
def neg_log_probability_with_gradient(weights, tableau, l1_mult=0.0, l2_mult=1.0, gaussian_priors=None):
""" Returns the negative log probability of the data AND a gradient vector.
This is the objective function used in learn_weights().
"""
update_maxent_values(weights, tableau)
logProbDat = 0
observed = [0 for i in range(len(weights))] # Vector of observed violations
expected = [0 for i in range(len(weights))] # Vector of expected violations
# Gaussian priors override L1/L2 priors
if gaussian_priors:
mus, sigmas = gaussian_priors[0], gaussian_priors[1]
normalized = (weights-mus)/sigmas
prob_prior = -(0.5*sum(normalized*normalized))
grad_prior = -(normalized/sigmas)
else:
l1_prob_prior = -(l1_mult * sum(weights))
l2_prob_prior = l2_mult * sum(weights*weights)
l1_grad_prior = -(l1_mult * scipy.ones(len(weights)))
l2_grad_prior = 2 * l2_mult * weights
prob_prior = -(l1_prob_prior + l2_prob_prior)
grad_prior = -(l1_grad_prior + l2_grad_prior)
for ur in tableau:
ur_count = 0 # Total observed for this UR
z = z_score(tableau, ur)
new_expected = [0 for i in range(len(weights))]
for sr in tableau[ur]:
ur_count += tableau[ur][sr][0]
prob = tableau[ur][sr][2] / z
logProbDat += math.log(prob) * tableau[ur][sr][0]
for c in tableau[ur][sr][1]:
observed[c] += tableau[ur][sr][1][c] * tableau[ur][sr][0]
new_expected[c] += tableau[ur][sr][1][c] * prob
for i in range(0,len(expected)):
expected[i] += new_expected[i] * ur_count
logProbDat += prob_prior
gradient = [e-o-p for e, o, p in zip(expected, observed, grad_prior)] # i.e. -(observed minus expected)
return (-logProbDat, np.array(gradient))
nlpwg = neg_log_probability_with_gradient # So you don't get carpal tunnel syndrome.
def neg_log_probability(weights, tableau, l1_mult=0.0, l2_mult=1.0):
""" Returns just the negative log probability of the data.
"""
return (nlpwg(weights, tableau, l1_mult, l2_mult))[0]
def probability(weights, tableau, l1_mult=0.0, l2_mult=1.0):
""" Returns just the probability of the data.
"""
return math.exp(-(nlpwg(weights, tableau, l1_mult, l2_mult))[0])
### OPTIMIZATION FUNCTION
def learn_weights(mt, l1_mult = 0.0, l2_mult = 1.0, precision = 10000000):
""" Given a filled-in megatableau, return the optimal weight vector.
"""
# Set up the initial weights and weight bounds (nonpositive reals)
w_0 = -scipy.rand(len(mt.weights)) # Random initial weights
#w_0 = [0 for w in mt.weights] # 0 initial weights
nonpos_reals = [(-50,0) for wt in mt.weights]
# Find the best weights
learned_weights, fneval, rc = scipy.optimize.fmin_l_bfgs_b(nlpwg, w_0, \
args = (mt.tableau,l1_mult,l2_mult, mt.gaussian_priors), bounds=nonpos_reals, factr=precision)
# Update the mt in place with the new weights
mt.weights = learned_weights
# Be sociable
print("\nBoom! Weights have been updated:")
for i in range(0,len(learned_weights)):
print("{}\t{}".format(mt.constraints_abbrev[i], str(learned_weights[i])))
print("\nLog probability of data: {}".format(str(-(nlpwg(learned_weights, mt.tableau))[0])))
print("")
# Return
return learned_weights
|
rdaland/PhoMEnt
|
optimizer.py
|
Python
|
bsd-3-clause
| 4,662
|
[
"Gaussian"
] |
f15d37a6dda554a5e224cc1f6a89e7ae51332377434642e2f4e9765b1c3c31c7
|
#===============================================================================
# LICENSE XOT-Framework - CC BY-NC-ND
#===============================================================================
# This work is licenced under the Creative Commons
# Attribution-Non-Commercial-No Derivative Works 3.0 Unported License. To view a
# copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/3.0/
# or send a letter to Creative Commons, 171 Second Street, Suite 300,
# San Francisco, California 94105, USA.
#===============================================================================
from regexer import Regexer
from logger import Logger
from urihandler import UriHandler
class MmsHelper:
"""Class that could help with parsing of simple MMS Stream files"""
def __init__(self):
"""Creates a class object. Should not be used. There are only static
methods available.
"""
raise NotImplementedError
@staticmethod
def GetMmsFromHtml(url, proxy=None, index=0):
"""Opens a URL with a MMS playlist and returns the first found stream
in the MMS file. Searches for http://url and returns mms://url.
Arguments:
url : string - the URL to a MMS playlist.
Keyword Arguments:
proxy : Proxy - Proxy info
index : int - The index of the item to retrieve
Returns:
The first found stream in a MMS playlist. If the <url> ends with .mms
it is assumed to already be a single stream. In that case the URL
is returned.
Example:
Ref1=http://url.here/stream1
Ref2=http://url.here/stream2
Will return: mms://url.here.stream1
"""
if url.find(".mms") > 0:
Logger.Info("MMS found in url: %s", url)
return url
Logger.Debug("Parsing %s to find MMS", url)
data = UriHandler.Open(url, pb=True, proxy=proxy)
urls = Regexer.DoRegex("[Rr]ef\d=http://([^\r\n]+)", data)
if len(urls) > index:
return "mms://%s" % (urls[index],)
elif len(urls) > 0:
return "mms://%s" % (urls[0],)
else:
return url
@staticmethod
def GetMmsFromAsx(url, proxy):
"""Opens a URL with an ASX playlist and returns the first found stream
in the ASX file. Only searches for mms://url.
Arguments:
url : string - the URL to an ASX playlist.
Returns:
The first found stream in an ASX playlist. If the <url> ends with .mms
it is assumed to already be a single stream. In that case the URL
is returned.
Example:
<asx version="3.0">
<title>Example.com Live Stream</title>
<entry>
<title>Short Announcement to Play Before Main Stream</title>
<ref href="http://example.com/announcement.wma" />
<param name="aParameterName" value="aParameterValue" />
</entry>
<entry>
<title>Example radio</title>
<ref href="mms://example.com:8080" />
<author>Example.com</author>
<copyright>2005 Example.com</copyright>
</entry>
</asx>
Will return: mms://example.com:8080 because it is the first MMS stream
"""
if url.find(".mms") > 0:
Logger.Info("MMS found in url: %s", url)
return url
Logger.Debug("Parsing %s to find MMS", url)
data = UriHandler.Open(url, pb=True, proxy=proxy)
urls = Regexer.DoRegex('[Rr]ef href="mms://([^"]+)"', data)
if len(urls) > 0:
return "mms://%s" % (urls[0],)
else:
return url
|
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/net.rieter.xot.smallplayer/resources/libs/helpers/mmshelper.py
|
Python
|
gpl-2.0
| 3,807
|
[
"VisIt"
] |
2c950d7583ae41d0362d82447114e0e0e6274061553f2d536989613cd5a7868e
|
#!/usr/bin/python
# python parser module for pipeline for miRNA full profiling with bowtie 20/4/2011
# version 1
# Usage class_mirparser.py <bowtie_out> <bowtie miRNA index> <LABEL> <output1 file> <output2 file>
# still to work. Split count still bugged
import sys, subprocess
from collections import defaultdict
def get_fasta (index="/home/galaxy/galaxy-dist/bowtie/5.37_Dmel/5.37_Dmel"): # here one must supply the exact base path for the library in galaxy environment
p = subprocess.Popen(args=["bowtie-inspect","-a", "0", index], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
outputlines = p.stdout.readlines()
p.wait()
miR_sequence_liste = {}
for line in outputlines:
if line[0] == ">":
miR_name = line[1:-1].split()[0]
else:
miR_sequence_liste[miR_name] = line[:-1]
return miR_sequence_liste
class Mirna:
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
self.matched_reads = []
self.dicmap = {}
def addread (self, offset, size):
#method to add reads to the object
self.matched_reads.append( (offset, size) )
self.dicmap[(offset, size)] = self.dicmap.get((offset, size), 0) + 1
return
def mircount (self):
#method to return the raw counts
return len(self.matched_reads)
def density (self):
'''method to output the read coverage by position in the mir'''
map = [0 for i in range (len(self.sequence))]
for offset, size in self.dicmap:
for i in range (offset, offset+size):
map[i] += self.dicmap[(offset,size)]
return map
def normalized_density (self):
map = self.density ()
maximum = float (max (map) ) or 1
length = float (len (map) ) or 1
Total_NoR = self.mircount()
output = ["mir\tcoordinate\tdensity\tNoR"]
for i, D in enumerate (map):
output.append("%s\t%s\t%s\t%s" % (self.name, (i+1)/length, D/maximum, Total_NoR))
return "\n".join(output)
def hitmap (self):
#method to output the reads above the premir
output = []
output.append (self.name)
output.append ( "%s\t%s\t%s\t%s" % (self.sequence, "offset", "size", "num reads") )
for pos_size in sorted(self.dicmap):
seq = self.sequence[ pos_size[0] : pos_size[0]+pos_size[1] ]
output.append ("%s%s%s\t%s\t%s\t%s" % ("."*len(self.sequence[:pos_size[0]]), seq, "."*len(self.sequence[pos_size[0]+pos_size[1]:]), pos_size[0]+1, pos_size[1], self.dicmap[pos_size] ) ) #attention pos_size[0]+1 because 1-based offset for biologists
return "\n".join(output)
def splitcount (self, shift):
#method to assign counts to 5p and 3p parts
median = len(self.sequence)/2
splitsite = 0
scores = []
for i in range(median-shift, median+shift+1):
countsum = 0
for pos_size in self.dicmap:
if pos_size[0] <= i <= pos_size[0]+pos_size[1]-1: continue
else: countsum = countsum + self.dicmap[pos_size]
scores.append(countsum)
firstmax = scores.index(max(scores))
scores.reverse()
lastmax = scores.index(max(scores))
scores.reverse()
split_selected = firstmax + len(scores[firstmax:-lastmax])/2 + median - shift
mir5p = 0
mir3p = 0
for pos_size in self.dicmap:
if pos_size[0] <= split_selected <= pos_size[0]+pos_size[1]-1: continue
elif split_selected <= pos_size[0]: mir3p = mir3p + self.dicmap[pos_size]
else : mir5p = mir5p + self.dicmap[pos_size]
return "%s_5p\t%s\n%s_3p\t%s" % (self.name, mir5p, self.name, mir3p)
def splitcount_2 (self, shift):
#new method to assign counts to 5p and 3p parts, base on density map
density_map = self.density()
median = len(self.sequence)/2
minimum = 0
densitydic = dict ([(i, density) for i, density in enumerate (density_map) if median-shift<= i <= median+shift ])
revdic = dict(map(lambda item: (item[1],item[0]),densitydic.items()))
mindensity_offset = revdic[min(revdic.keys())]
mir5p = 0
mir3p = 0
for pos_size in self.dicmap:
if mindensity_offset in range (pos_size[0], pos_size[0]+pos_size[1]): continue
# if pos_size[0] <= mindensity_offset <= pos_size[0]+pos_size[1]-1: continue
if mindensity_offset <= pos_size[0]: mir3p = mir3p + self.dicmap[pos_size]
else : mir5p = mir5p + self.dicmap[pos_size]
return "%s_5p\t%s\n%s_3p\t%s" % (self.name, mir5p, self.name, mir3p)
mirdict = get_fasta (sys.argv[2])
dicobject = {}
for mir in mirdict:
dicobject[mir] = Mirna(mir, mirdict[mir])
F = open (sys.argv[1], "r")
for line in F:
fields = line.split()
name = fields[1]
offset= int(fields[2])
sequence= fields[3]
dicobject[name].addread(offset, len(sequence))
F.close()
F = open (sys.argv[4], "w")
for mir in sorted(dicobject):
print >> F, dicobject[mir].hitmap()
for i, counts in enumerate (dicobject[mir].density()):
print >> F, "%s\t%s" % (i+1, counts) # attention 1-based offset for biologists
print >> F
print >> F, dicobject[mir].normalized_density()
F.close()
F = open (sys.argv[5], "w")
print >> F, "gene\t%s" % sys.argv[3]
for mir in sorted(dicobject):
print >> F, dicobject[mir].splitcount_2(15)
|
JuPeg/tools-artbio
|
unstable/local_tools/class_mirparser.py
|
Python
|
mit
| 5,141
|
[
"Bowtie",
"Galaxy"
] |
3453a33a38b3e5720b0e099f64ea05e287d2c38011f81f003128389503cf2f45
|
########################################################################
# $HeadURL $
# File: ReTransfer.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/04/02 14:24:21
########################################################################
""" :mod: ReTransfer
================
.. module: ReTransfer
:synopsis: ReTransfer Operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
ReTransfer Operation handler
"""
# #
# @file ReTransfer.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/04/02 14:24:31
# @brief Definition of ReTransfer class.
# # imports
from DIRAC import S_OK, S_ERROR
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.MonitoringSystem.Client.MonitoringReporter import MonitoringReporter
########################################################################
class ReTransfer(DMSRequestOperationsBase):
"""
.. class:: ReTransfer
online ReTransfer operation handler
:param self: self reference
:param ~DIRAC.RequestManagementSystem.Client.Operation.Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
def __init__(self, operation=None, csPath=None):
"""c'tor"""
# # base class ctor
DMSRequestOperationsBase.__init__(self, operation, csPath)
def __call__(self):
"""reTransfer operation execution"""
# The flag 'rmsMonitoring' is set by the RequestTask and is False by default.
# Here we use 'createRMSRecord' to create the ES record which is defined inside OperationHandlerBase.
if self.rmsMonitoring:
self.rmsMonitoringReporter = MonitoringReporter(monitoringType="RMSMonitoring")
else:
# # gMonitor stuff
gMonitor.registerActivity(
"FileReTransferAtt", "File retransfers attempted", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM
)
gMonitor.registerActivity(
"FileReTransferOK", "File retransfers successful", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM
)
gMonitor.registerActivity(
"FileReTransferFail", "File retransfers failed", "RequestExecutingAgent", "Files/min", gMonitor.OP_SUM
)
# # list of targetSEs
targetSEs = self.operation.targetSEList
# # check targetSEs for removal
targetSE = targetSEs[0]
bannedTargets = self.checkSEsRSS(targetSE)
if not bannedTargets["OK"]:
if self.rmsMonitoring:
for status in ["Attempted", "Failed"]:
self.rmsMonitoringReporter.addRecord(self.createRMSRecord(status, len(self.operation)))
self.rmsMonitoringReporter.commit()
else:
gMonitor.addMark("FileReTransferAtt")
gMonitor.addMark("FileReTransferFail")
return bannedTargets
if bannedTargets["Value"]:
return S_OK("%s targets are banned for writing" % ",".join(bannedTargets["Value"]))
# # get waiting files
waitingFiles = self.getWaitingFilesList()
# # prepare waiting files
toRetransfer = dict([(opFile.PFN, opFile) for opFile in waitingFiles])
if self.rmsMonitoring:
self.rmsMonitoringReporter.addRecord(self.createRMSRecord("Attempted", len(toRetransfer)))
else:
gMonitor.addMark("FileReTransferAtt", len(toRetransfer))
if len(targetSEs) != 1:
error = "only one TargetSE allowed, got %d" % len(targetSEs)
for opFile in toRetransfer.values():
opFile.Error = error
opFile.Status = "Failed"
self.operation.Error = error
if self.rmsMonitoring:
self.rmsMonitoringReporter.addRecord(self.createRMSRecord("Failed", len(toRetransfer)))
self.rmsMonitoringReporter.commit()
else:
gMonitor.addMark("FileReTransferFail", len(toRetransfer))
return S_ERROR(error)
se = StorageElement(targetSE)
for opFile in toRetransfer.values():
reTransfer = se.retransferOnlineFile(opFile.LFN)
if not reTransfer["OK"]:
opFile.Error = reTransfer["Message"]
self.log.error("Retransfer failed", opFile.Error)
if self.rmsMonitoring:
self.rmsMonitoringReporter.addRecord(self.createRMSRecord("Failed", 1))
else:
gMonitor.addMark("FileReTransferFail", 1)
continue
reTransfer = reTransfer["Value"]
if opFile.LFN in reTransfer["Failed"]:
opFile.Error = reTransfer["Failed"][opFile.LFN]
self.log.error("Retransfer failed", opFile.Error)
if self.rmsMonitoring:
self.rmsMonitoringReporter.addRecord(self.createRMSRecord("Failed", 1))
else:
gMonitor.addMark("FileReTransferFail", 1)
continue
opFile.Status = "Done"
self.log.info("%s retransfer done" % opFile.LFN)
if self.rmsMonitoring:
self.rmsMonitoringReporter.addRecord(self.createRMSRecord("Successful", 1))
else:
gMonitor.addMark("FileReTransferOK", 1)
if self.rmsMonitoring:
self.rmsMonitoringReporter.commit()
return S_OK()
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/Agent/RequestOperations/ReTransfer.py
|
Python
|
gpl-3.0
| 5,674
|
[
"DIRAC"
] |
69e666ac671012e1bd1246aadb04f2ffcdfcd6d07b92d0e85302c4872986d02d
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2004-2006 The Regents of The University of Michigan
# Copyright (c) 2010-20013 Advanced Micro Devices, Inc.
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Nathan Binkert
# Andreas Hansson
import sys
from types import FunctionType, MethodType, ModuleType
import m5
from m5.util import *
# Have to import params up top since Param is referenced on initial
# load (when SimObject class references Param to create a class
# variable, the 'name' param)...
from m5.params import *
# There are a few things we need that aren't in params.__all__ since
# normal users don't need them
from m5.params import ParamDesc, VectorParamDesc, \
isNullPointer, SimObjectVector, Port
from m5.proxy import *
from m5.proxy import isproxy
#####################################################################
#
# M5 Python Configuration Utility
#
# The basic idea is to write simple Python programs that build Python
# objects corresponding to M5 SimObjects for the desired simulation
# configuration. For now, the Python emits a .ini file that can be
# parsed by M5. In the future, some tighter integration between M5
# and the Python interpreter may allow bypassing the .ini file.
#
# Each SimObject class in M5 is represented by a Python class with the
# same name. The Python inheritance tree mirrors the M5 C++ tree
# (e.g., SimpleCPU derives from BaseCPU in both cases, and all
# SimObjects inherit from a single SimObject base class). To specify
# an instance of an M5 SimObject in a configuration, the user simply
# instantiates the corresponding Python object. The parameters for
# that SimObject are given by assigning to attributes of the Python
# object, either using keyword assignment in the constructor or in
# separate assignment statements. For example:
#
# cache = BaseCache(size='64KB')
# cache.hit_latency = 3
# cache.assoc = 8
#
# The magic lies in the mapping of the Python attributes for SimObject
# classes to the actual SimObject parameter specifications. This
# allows parameter validity checking in the Python code. Continuing
# the example above, the statements "cache.blurfl=3" or
# "cache.assoc='hello'" would both result in runtime errors in Python,
# since the BaseCache object has no 'blurfl' parameter and the 'assoc'
# parameter requires an integer, respectively. This magic is done
# primarily by overriding the special __setattr__ method that controls
# assignment to object attributes.
#
# Once a set of Python objects have been instantiated in a hierarchy,
# calling 'instantiate(obj)' (where obj is the root of the hierarchy)
# will generate a .ini file.
#
#####################################################################
# list of all SimObject classes
allClasses = {}
# dict to look up SimObjects based on path
instanceDict = {}
# Did any of the SimObjects lack a header file?
noCxxHeader = False
def public_value(key, value):
return key.startswith('_') or \
isinstance(value, (FunctionType, MethodType, ModuleType,
classmethod, type))
# The metaclass for SimObject. This class controls how new classes
# that derive from SimObject are instantiated, and provides inherited
# class behavior (just like a class controls how instances of that
# class are instantiated, and provides inherited instance behavior).
class MetaSimObject(type):
# Attributes that can be set only at initialization time
init_keywords = { 'abstract' : bool,
'cxx_class' : str,
'cxx_type' : str,
'cxx_header' : str,
'type' : str,
'cxx_bases' : list }
# Attributes that can be set any time
keywords = { 'check' : FunctionType }
# __new__ is called before __init__, and is where the statements
# in the body of the class definition get loaded into the class's
# __dict__. We intercept this to filter out parameter & port assignments
# and only allow "private" attributes to be passed to the base
# __new__ (starting with underscore).
def __new__(mcls, name, bases, dict):
assert name not in allClasses, "SimObject %s already present" % name
# Copy "private" attributes, functions, and classes to the
# official dict. Everything else goes in _init_dict to be
# filtered in __init__.
cls_dict = {}
value_dict = {}
for key,val in dict.items():
if public_value(key, val):
cls_dict[key] = val
else:
# must be a param/port setting
value_dict[key] = val
if 'abstract' not in value_dict:
value_dict['abstract'] = False
if 'cxx_bases' not in value_dict:
value_dict['cxx_bases'] = []
cls_dict['_value_dict'] = value_dict
cls = super(MetaSimObject, mcls).__new__(mcls, name, bases, cls_dict)
if 'type' in value_dict:
allClasses[name] = cls
return cls
# subclass initialization
def __init__(cls, name, bases, dict):
# calls type.__init__()... I think that's a no-op, but leave
# it here just in case it's not.
super(MetaSimObject, cls).__init__(name, bases, dict)
# initialize required attributes
# class-only attributes
cls._params = multidict() # param descriptions
cls._ports = multidict() # port descriptions
# class or instance attributes
cls._values = multidict() # param values
cls._children = multidict() # SimObject children
cls._port_refs = multidict() # port ref objects
cls._instantiated = False # really instantiated, cloned, or subclassed
# We don't support multiple inheritance of sim objects. If you want
# to, you must fix multidict to deal with it properly. Non sim-objects
# are ok, though
bTotal = 0
for c in bases:
if isinstance(c, MetaSimObject):
bTotal += 1
if bTotal > 1:
raise TypeError, "SimObjects do not support multiple inheritance"
base = bases[0]
# Set up general inheritance via multidicts. A subclass will
# inherit all its settings from the base class. The only time
# the following is not true is when we define the SimObject
# class itself (in which case the multidicts have no parent).
if isinstance(base, MetaSimObject):
cls._base = base
cls._params.parent = base._params
cls._ports.parent = base._ports
cls._values.parent = base._values
cls._children.parent = base._children
cls._port_refs.parent = base._port_refs
# mark base as having been subclassed
base._instantiated = True
else:
cls._base = None
# default keyword values
if 'type' in cls._value_dict:
if 'cxx_class' not in cls._value_dict:
cls._value_dict['cxx_class'] = cls._value_dict['type']
cls._value_dict['cxx_type'] = '%s *' % cls._value_dict['cxx_class']
if 'cxx_header' not in cls._value_dict:
global noCxxHeader
noCxxHeader = True
warn("No header file specified for SimObject: %s", name)
# Export methods are automatically inherited via C++, so we
# don't want the method declarations to get inherited on the
# python side (and thus end up getting repeated in the wrapped
# versions of derived classes). The code below basicallly
# suppresses inheritance by substituting in the base (null)
# versions of these methods unless a different version is
# explicitly supplied.
for method_name in ('export_methods', 'export_method_cxx_predecls',
'export_method_swig_predecls'):
if method_name not in cls.__dict__:
base_method = getattr(MetaSimObject, method_name)
m = MethodType(base_method, cls, MetaSimObject)
setattr(cls, method_name, m)
# Now process the _value_dict items. They could be defining
# new (or overriding existing) parameters or ports, setting
# class keywords (e.g., 'abstract'), or setting parameter
# values or port bindings. The first 3 can only be set when
# the class is defined, so we handle them here. The others
# can be set later too, so just emulate that by calling
# setattr().
for key,val in cls._value_dict.items():
# param descriptions
if isinstance(val, ParamDesc):
cls._new_param(key, val)
# port objects
elif isinstance(val, Port):
cls._new_port(key, val)
# init-time-only keywords
elif cls.init_keywords.has_key(key):
cls._set_keyword(key, val, cls.init_keywords[key])
# default: use normal path (ends up in __setattr__)
else:
setattr(cls, key, val)
def _set_keyword(cls, keyword, val, kwtype):
if not isinstance(val, kwtype):
raise TypeError, 'keyword %s has bad type %s (expecting %s)' % \
(keyword, type(val), kwtype)
if isinstance(val, FunctionType):
val = classmethod(val)
type.__setattr__(cls, keyword, val)
def _new_param(cls, name, pdesc):
# each param desc should be uniquely assigned to one variable
assert(not hasattr(pdesc, 'name'))
pdesc.name = name
cls._params[name] = pdesc
if hasattr(pdesc, 'default'):
cls._set_param(name, pdesc.default, pdesc)
def _set_param(cls, name, value, param):
assert(param.name == name)
try:
value = param.convert(value)
except Exception, e:
msg = "%s\nError setting param %s.%s to %s\n" % \
(e, cls.__name__, name, value)
e.args = (msg, )
raise
cls._values[name] = value
# if param value is a SimObject, make it a child too, so that
# it gets cloned properly when the class is instantiated
if isSimObjectOrVector(value) and not value.has_parent():
cls._add_cls_child(name, value)
def _add_cls_child(cls, name, child):
# It's a little funky to have a class as a parent, but these
# objects should never be instantiated (only cloned, which
# clears the parent pointer), and this makes it clear that the
# object is not an orphan and can provide better error
# messages.
child.set_parent(cls, name)
cls._children[name] = child
def _new_port(cls, name, port):
# each port should be uniquely assigned to one variable
assert(not hasattr(port, 'name'))
port.name = name
cls._ports[name] = port
# same as _get_port_ref, effectively, but for classes
def _cls_get_port_ref(cls, attr):
# Return reference that can be assigned to another port
# via __setattr__. There is only ever one reference
# object per port, but we create them lazily here.
ref = cls._port_refs.get(attr)
if not ref:
ref = cls._ports[attr].makeRef(cls)
cls._port_refs[attr] = ref
return ref
# Set attribute (called on foo.attr = value when foo is an
# instance of class cls).
def __setattr__(cls, attr, value):
# normal processing for private attributes
if public_value(attr, value):
type.__setattr__(cls, attr, value)
return
if cls.keywords.has_key(attr):
cls._set_keyword(attr, value, cls.keywords[attr])
return
if cls._ports.has_key(attr):
cls._cls_get_port_ref(attr).connect(value)
return
if isSimObjectOrSequence(value) and cls._instantiated:
raise RuntimeError, \
"cannot set SimObject parameter '%s' after\n" \
" class %s has been instantiated or subclassed" \
% (attr, cls.__name__)
# check for param
param = cls._params.get(attr)
if param:
cls._set_param(attr, value, param)
return
if isSimObjectOrSequence(value):
# If RHS is a SimObject, it's an implicit child assignment.
cls._add_cls_child(attr, coerceSimObjectOrVector(value))
return
# no valid assignment... raise exception
raise AttributeError, \
"Class %s has no parameter \'%s\'" % (cls.__name__, attr)
def __getattr__(cls, attr):
if attr == 'cxx_class_path':
return cls.cxx_class.split('::')
if attr == 'cxx_class_name':
return cls.cxx_class_path[-1]
if attr == 'cxx_namespaces':
return cls.cxx_class_path[:-1]
if cls._values.has_key(attr):
return cls._values[attr]
if cls._children.has_key(attr):
return cls._children[attr]
raise AttributeError, \
"object '%s' has no attribute '%s'" % (cls.__name__, attr)
def __str__(cls):
return cls.__name__
# See ParamValue.cxx_predecls for description.
def cxx_predecls(cls, code):
code('#include "params/$cls.hh"')
# See ParamValue.swig_predecls for description.
def swig_predecls(cls, code):
code('%import "python/m5/internal/param_$cls.i"')
# Hook for exporting additional C++ methods to Python via SWIG.
# Default is none, override using @classmethod in class definition.
def export_methods(cls, code):
pass
# Generate the code needed as a prerequisite for the C++ methods
# exported via export_methods() to be compiled in the _wrap.cc
# file. Typically generates one or more #include statements. If
# any methods are exported, typically at least the C++ header
# declaring the relevant SimObject class must be included.
def export_method_cxx_predecls(cls, code):
pass
# Generate the code needed as a prerequisite for the C++ methods
# exported via export_methods() to be processed by SWIG.
# Typically generates one or more %include or %import statements.
# If any methods are exported, typically at least the C++ header
# declaring the relevant SimObject class must be included.
def export_method_swig_predecls(cls, code):
pass
# Generate the declaration for this object for wrapping with SWIG.
# Generates code that goes into a SWIG .i file. Called from
# src/SConscript.
def swig_decl(cls, code):
class_path = cls.cxx_class.split('::')
classname = class_path[-1]
namespaces = class_path[:-1]
# The 'local' attribute restricts us to the params declared in
# the object itself, not including inherited params (which
# will also be inherited from the base class's param struct
# here).
params = cls._params.local.values()
ports = cls._ports.local
code('%module(package="m5.internal") param_$cls')
code()
code('%{')
code('#include "sim/sim_object.hh"')
code('#include "params/$cls.hh"')
for param in params:
param.cxx_predecls(code)
code('#include "${{cls.cxx_header}}"')
cls.export_method_cxx_predecls(code)
code('''\
/**
* This is a workaround for bug in swig. Prior to gcc 4.6.1 the STL
* headers like vector, string, etc. used to automatically pull in
* the cstddef header but starting with gcc 4.6.1 they no longer do.
* This leads to swig generated a file that does not compile so we
* explicitly include cstddef. Additionally, including version 2.0.4,
* swig uses ptrdiff_t without the std:: namespace prefix which is
* required with gcc 4.6.1. We explicitly provide access to it.
*/
#include <cstddef>
using std::ptrdiff_t;
''')
code('%}')
code()
for param in params:
param.swig_predecls(code)
cls.export_method_swig_predecls(code)
code()
if cls._base:
code('%import "python/m5/internal/param_${{cls._base}}.i"')
code()
for ns in namespaces:
code('namespace $ns {')
if namespaces:
code('// avoid name conflicts')
sep_string = '_COLONS_'
flat_name = sep_string.join(class_path)
code('%rename($flat_name) $classname;')
code()
code('// stop swig from creating/wrapping default ctor/dtor')
code('%nodefault $classname;')
code('class $classname')
if cls._base:
bases = [ cls._base.cxx_class ] + cls.cxx_bases
else:
bases = cls.cxx_bases
base_first = True
for base in bases:
if base_first:
code(' : public ${{base}}')
base_first = False
else:
code(' , public ${{base}}')
code('{')
code(' public:')
cls.export_methods(code)
code('};')
for ns in reversed(namespaces):
code('} // namespace $ns')
code()
code('%include "params/$cls.hh"')
# Generate the C++ declaration (.hh file) for this SimObject's
# param struct. Called from src/SConscript.
def cxx_param_decl(cls, code):
# The 'local' attribute restricts us to the params declared in
# the object itself, not including inherited params (which
# will also be inherited from the base class's param struct
# here).
params = cls._params.local.values()
ports = cls._ports.local
try:
ptypes = [p.ptype for p in params]
except:
print cls, p, p.ptype_str
print params
raise
class_path = cls._value_dict['cxx_class'].split('::')
code('''\
#ifndef __PARAMS__${cls}__
#define __PARAMS__${cls}__
''')
# A forward class declaration is sufficient since we are just
# declaring a pointer.
for ns in class_path[:-1]:
code('namespace $ns {')
code('class $0;', class_path[-1])
for ns in reversed(class_path[:-1]):
code('} // namespace $ns')
code()
# The base SimObject has a couple of params that get
# automatically set from Python without being declared through
# the normal Param mechanism; we slip them in here (needed
# predecls now, actual declarations below)
if cls == SimObject:
code('''
#ifndef PY_VERSION
struct PyObject;
#endif
#include <string>
''')
for param in params:
param.cxx_predecls(code)
for port in ports.itervalues():
port.cxx_predecls(code)
code()
if cls._base:
code('#include "params/${{cls._base.type}}.hh"')
code()
for ptype in ptypes:
if issubclass(ptype, Enum):
code('#include "enums/${{ptype.__name__}}.hh"')
code()
# now generate the actual param struct
code("struct ${cls}Params")
if cls._base:
code(" : public ${{cls._base.type}}Params")
code("{")
if not hasattr(cls, 'abstract') or not cls.abstract:
if 'type' in cls.__dict__:
code(" ${{cls.cxx_type}} create();")
code.indent()
if cls == SimObject:
code('''
SimObjectParams() {}
virtual ~SimObjectParams() {}
std::string name;
PyObject *pyobj;
''')
for param in params:
param.cxx_decl(code)
for port in ports.itervalues():
port.cxx_decl(code)
code.dedent()
code('};')
code()
code('#endif // __PARAMS__${cls}__')
return code
# This *temporary* definition is required to support calls from the
# SimObject class definition to the MetaSimObject methods (in
# particular _set_param, which gets called for parameters with default
# values defined on the SimObject class itself). It will get
# overridden by the permanent definition (which requires that
# SimObject be defined) lower in this file.
def isSimObjectOrVector(value):
return False
# The SimObject class is the root of the special hierarchy. Most of
# the code in this class deals with the configuration hierarchy itself
# (parent/child node relationships).
class SimObject(object):
# Specify metaclass. Any class inheriting from SimObject will
# get this metaclass.
__metaclass__ = MetaSimObject
type = 'SimObject'
abstract = True
cxx_header = "sim/sim_object.hh"
cxx_bases = [ "Drainable", "Serializable" ]
eventq_index = Param.UInt32(Parent.eventq_index, "Event Queue Index")
@classmethod
def export_method_swig_predecls(cls, code):
code('''
%include <std_string.i>
%import "python/swig/drain.i"
%import "python/swig/serialize.i"
''')
@classmethod
def export_methods(cls, code):
code('''
void init();
void loadState(Checkpoint *cp);
void initState();
void regStats();
void resetStats();
void regProbePoints();
void regProbeListeners();
void startup();
''')
# Initialize new instance. For objects with SimObject-valued
# children, we need to recursively clone the classes represented
# by those param values as well in a consistent "deep copy"-style
# fashion. That is, we want to make sure that each instance is
# cloned only once, and that if there are multiple references to
# the same original object, we end up with the corresponding
# cloned references all pointing to the same cloned instance.
def __init__(self, **kwargs):
ancestor = kwargs.get('_ancestor')
memo_dict = kwargs.get('_memo')
if memo_dict is None:
# prepare to memoize any recursively instantiated objects
memo_dict = {}
elif ancestor:
# memoize me now to avoid problems with recursive calls
memo_dict[ancestor] = self
if not ancestor:
ancestor = self.__class__
ancestor._instantiated = True
# initialize required attributes
self._parent = None
self._name = None
self._ccObject = None # pointer to C++ object
self._ccParams = None
self._instantiated = False # really "cloned"
# Clone children specified at class level. No need for a
# multidict here since we will be cloning everything.
# Do children before parameter values so that children that
# are also param values get cloned properly.
self._children = {}
for key,val in ancestor._children.iteritems():
self.add_child(key, val(_memo=memo_dict))
# Inherit parameter values from class using multidict so
# individual value settings can be overridden but we still
# inherit late changes to non-overridden class values.
self._values = multidict(ancestor._values)
# clone SimObject-valued parameters
for key,val in ancestor._values.iteritems():
val = tryAsSimObjectOrVector(val)
if val is not None:
self._values[key] = val(_memo=memo_dict)
# clone port references. no need to use a multidict here
# since we will be creating new references for all ports.
self._port_refs = {}
for key,val in ancestor._port_refs.iteritems():
self._port_refs[key] = val.clone(self, memo_dict)
# apply attribute assignments from keyword args, if any
for key,val in kwargs.iteritems():
setattr(self, key, val)
# "Clone" the current instance by creating another instance of
# this instance's class, but that inherits its parameter values
# and port mappings from the current instance. If we're in a
# "deep copy" recursive clone, check the _memo dict to see if
# we've already cloned this instance.
def __call__(self, **kwargs):
memo_dict = kwargs.get('_memo')
if memo_dict is None:
# no memo_dict: must be top-level clone operation.
# this is only allowed at the root of a hierarchy
if self._parent:
raise RuntimeError, "attempt to clone object %s " \
"not at the root of a tree (parent = %s)" \
% (self, self._parent)
# create a new dict and use that.
memo_dict = {}
kwargs['_memo'] = memo_dict
elif memo_dict.has_key(self):
# clone already done & memoized
return memo_dict[self]
return self.__class__(_ancestor = self, **kwargs)
def _get_port_ref(self, attr):
# Return reference that can be assigned to another port
# via __setattr__. There is only ever one reference
# object per port, but we create them lazily here.
ref = self._port_refs.get(attr)
if ref == None:
ref = self._ports[attr].makeRef(self)
self._port_refs[attr] = ref
return ref
def __getattr__(self, attr):
if self._ports.has_key(attr):
return self._get_port_ref(attr)
if self._values.has_key(attr):
return self._values[attr]
if self._children.has_key(attr):
return self._children[attr]
# If the attribute exists on the C++ object, transparently
# forward the reference there. This is typically used for
# SWIG-wrapped methods such as init(), regStats(),
# resetStats(), startup(), drain(), and
# resume().
if self._ccObject and hasattr(self._ccObject, attr):
return getattr(self._ccObject, attr)
err_string = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, attr)
if not self._ccObject:
err_string += "\n (C++ object is not yet constructed," \
" so wrapped C++ methods are unavailable.)"
raise AttributeError, err_string
# Set attribute (called on foo.attr = value when foo is an
# instance of class cls).
def __setattr__(self, attr, value):
# normal processing for private attributes
if attr.startswith('_'):
object.__setattr__(self, attr, value)
return
if self._ports.has_key(attr):
# set up port connection
self._get_port_ref(attr).connect(value)
return
param = self._params.get(attr)
if param:
try:
value = param.convert(value)
except Exception, e:
msg = "%s\nError setting param %s.%s to %s\n" % \
(e, self.__class__.__name__, attr, value)
e.args = (msg, )
raise
self._values[attr] = value
# implicitly parent unparented objects assigned as params
if isSimObjectOrVector(value) and not value.has_parent():
self.add_child(attr, value)
return
# if RHS is a SimObject, it's an implicit child assignment
if isSimObjectOrSequence(value):
self.add_child(attr, value)
return
# no valid assignment... raise exception
raise AttributeError, "Class %s has no parameter %s" \
% (self.__class__.__name__, attr)
# this hack allows tacking a '[0]' onto parameters that may or may
# not be vectors, and always getting the first element (e.g. cpus)
def __getitem__(self, key):
if key == 0:
return self
raise TypeError, "Non-zero index '%s' to SimObject" % key
# Also implemented by SimObjectVector
def clear_parent(self, old_parent):
assert self._parent is old_parent
self._parent = None
# Also implemented by SimObjectVector
def set_parent(self, parent, name):
self._parent = parent
self._name = name
# Return parent object of this SimObject, not implemented by SimObjectVector
# because the elements in a SimObjectVector may not share the same parent
def get_parent(self):
return self._parent
# Also implemented by SimObjectVector
def get_name(self):
return self._name
# Also implemented by SimObjectVector
def has_parent(self):
return self._parent is not None
# clear out child with given name. This code is not likely to be exercised.
# See comment in add_child.
def clear_child(self, name):
child = self._children[name]
child.clear_parent(self)
del self._children[name]
# Add a new child to this object.
def add_child(self, name, child):
child = coerceSimObjectOrVector(child)
if child.has_parent():
warn("add_child('%s'): child '%s' already has parent", name,
child.get_name())
if self._children.has_key(name):
# This code path had an undiscovered bug that would make it fail
# at runtime. It had been here for a long time and was only
# exposed by a buggy script. Changes here will probably not be
# exercised without specialized testing.
self.clear_child(name)
child.set_parent(self, name)
self._children[name] = child
# Take SimObject-valued parameters that haven't been explicitly
# assigned as children and make them children of the object that
# they were assigned to as a parameter value. This guarantees
# that when we instantiate all the parameter objects we're still
# inside the configuration hierarchy.
def adoptOrphanParams(self):
for key,val in self._values.iteritems():
if not isSimObjectVector(val) and isSimObjectSequence(val):
# need to convert raw SimObject sequences to
# SimObjectVector class so we can call has_parent()
val = SimObjectVector(val)
self._values[key] = val
if isSimObjectOrVector(val) and not val.has_parent():
warn("%s adopting orphan SimObject param '%s'", self, key)
self.add_child(key, val)
def path(self):
if not self._parent:
return '<orphan %s>' % self.__class__
ppath = self._parent.path()
if ppath == 'root':
return self._name
return ppath + "." + self._name
def __str__(self):
return self.path()
def ini_str(self):
return self.path()
def find_any(self, ptype):
if isinstance(self, ptype):
return self, True
found_obj = None
for child in self._children.itervalues():
visited = False
if hasattr(child, '_visited'):
visited = getattr(child, '_visited')
if isinstance(child, ptype) and not visited:
if found_obj != None and child != found_obj:
raise AttributeError, \
'parent.any matched more than one: %s %s' % \
(found_obj.path, child.path)
found_obj = child
# search param space
for pname,pdesc in self._params.iteritems():
if issubclass(pdesc.ptype, ptype):
match_obj = self._values[pname]
if found_obj != None and found_obj != match_obj:
raise AttributeError, \
'parent.any matched more than one: %s and %s' % (found_obj.path, match_obj.path)
found_obj = match_obj
return found_obj, found_obj != None
def find_all(self, ptype):
all = {}
# search children
for child in self._children.itervalues():
# a child could be a list, so ensure we visit each item
if isinstance(child, list):
children = child
else:
children = [child]
for child in children:
if isinstance(child, ptype) and not isproxy(child) and \
not isNullPointer(child):
all[child] = True
if isSimObject(child):
# also add results from the child itself
child_all, done = child.find_all(ptype)
all.update(dict(zip(child_all, [done] * len(child_all))))
# search param space
for pname,pdesc in self._params.iteritems():
if issubclass(pdesc.ptype, ptype):
match_obj = self._values[pname]
if not isproxy(match_obj) and not isNullPointer(match_obj):
all[match_obj] = True
return all.keys(), True
def unproxy(self, base):
return self
def unproxyParams(self):
for param in self._params.iterkeys():
value = self._values.get(param)
if value != None and isproxy(value):
try:
value = value.unproxy(self)
except:
print "Error in unproxying param '%s' of %s" % \
(param, self.path())
raise
setattr(self, param, value)
# Unproxy ports in sorted order so that 'append' operations on
# vector ports are done in a deterministic fashion.
port_names = self._ports.keys()
port_names.sort()
for port_name in port_names:
port = self._port_refs.get(port_name)
if port != None:
port.unproxy(self)
def print_ini(self, ini_file):
print >>ini_file, '[' + self.path() + ']' # .ini section header
instanceDict[self.path()] = self
if hasattr(self, 'type'):
print >>ini_file, 'type=%s' % self.type
if len(self._children.keys()):
print >>ini_file, 'children=%s' % \
' '.join(self._children[n].get_name() \
for n in sorted(self._children.keys()))
for param in sorted(self._params.keys()):
value = self._values.get(param)
if value != None:
print >>ini_file, '%s=%s' % (param,
self._values[param].ini_str())
for port_name in sorted(self._ports.keys()):
port = self._port_refs.get(port_name, None)
if port != None:
print >>ini_file, '%s=%s' % (port_name, port.ini_str())
print >>ini_file # blank line between objects
# generate a tree of dictionaries expressing all the parameters in the
# instantiated system for use by scripts that want to do power, thermal
# visualization, and other similar tasks
def get_config_as_dict(self):
d = attrdict()
if hasattr(self, 'type'):
d.type = self.type
if hasattr(self, 'cxx_class'):
d.cxx_class = self.cxx_class
# Add the name and path of this object to be able to link to
# the stats
d.name = self.get_name()
d.path = self.path()
for param in sorted(self._params.keys()):
value = self._values.get(param)
if value != None:
try:
# Use native type for those supported by JSON and
# strings for everything else. skipkeys=True seems
# to not work as well as one would hope
if type(self._values[param].value) in \
[str, unicode, int, long, float, bool, None]:
d[param] = self._values[param].value
else:
d[param] = str(self._values[param])
except AttributeError:
pass
for n in sorted(self._children.keys()):
child = self._children[n]
# Use the name of the attribute (and not get_name()) as
# the key in the JSON dictionary to capture the hierarchy
# in the Python code that assembled this system
d[n] = child.get_config_as_dict()
for port_name in sorted(self._ports.keys()):
port = self._port_refs.get(port_name, None)
if port != None:
# Represent each port with a dictionary containing the
# prominent attributes
d[port_name] = port.get_config_as_dict()
return d
def getCCParams(self):
if self._ccParams:
return self._ccParams
cc_params_struct = getattr(m5.internal.params, '%sParams' % self.type)
cc_params = cc_params_struct()
cc_params.pyobj = self
cc_params.name = str(self)
param_names = self._params.keys()
param_names.sort()
for param in param_names:
value = self._values.get(param)
if value is None:
fatal("%s.%s without default or user set value",
self.path(), param)
value = value.getValue()
if isinstance(self._params[param], VectorParamDesc):
assert isinstance(value, list)
vec = getattr(cc_params, param)
assert not len(vec)
for v in value:
vec.append(v)
else:
setattr(cc_params, param, value)
port_names = self._ports.keys()
port_names.sort()
for port_name in port_names:
port = self._port_refs.get(port_name, None)
if port != None:
port_count = len(port)
else:
port_count = 0
setattr(cc_params, 'port_' + port_name + '_connection_count',
port_count)
self._ccParams = cc_params
return self._ccParams
# Get C++ object corresponding to this object, calling C++ if
# necessary to construct it. Does *not* recursively create
# children.
def getCCObject(self):
if not self._ccObject:
# Make sure this object is in the configuration hierarchy
if not self._parent and not isRoot(self):
raise RuntimeError, "Attempt to instantiate orphan node"
# Cycles in the configuration hierarchy are not supported. This
# will catch the resulting recursion and stop.
self._ccObject = -1
params = self.getCCParams()
self._ccObject = params.create()
elif self._ccObject == -1:
raise RuntimeError, "%s: Cycle found in configuration hierarchy." \
% self.path()
return self._ccObject
def descendants(self):
yield self
for child in self._children.itervalues():
for obj in child.descendants():
yield obj
# Call C++ to create C++ object corresponding to this object
def createCCObject(self):
self.getCCParams()
self.getCCObject() # force creation
def getValue(self):
return self.getCCObject()
# Create C++ port connections corresponding to the connections in
# _port_refs
def connectPorts(self):
for portRef in self._port_refs.itervalues():
portRef.ccConnect()
# Function to provide to C++ so it can look up instances based on paths
def resolveSimObject(name):
obj = instanceDict[name]
return obj.getCCObject()
def isSimObject(value):
return isinstance(value, SimObject)
def isSimObjectClass(value):
return issubclass(value, SimObject)
def isSimObjectVector(value):
return isinstance(value, SimObjectVector)
def isSimObjectSequence(value):
if not isinstance(value, (list, tuple)) or len(value) == 0:
return False
for val in value:
if not isNullPointer(val) and not isSimObject(val):
return False
return True
def isSimObjectOrSequence(value):
return isSimObject(value) or isSimObjectSequence(value)
def isRoot(obj):
from m5.objects import Root
return obj and obj is Root.getInstance()
def isSimObjectOrVector(value):
return isSimObject(value) or isSimObjectVector(value)
def tryAsSimObjectOrVector(value):
if isSimObjectOrVector(value):
return value
if isSimObjectSequence(value):
return SimObjectVector(value)
return None
def coerceSimObjectOrVector(value):
value = tryAsSimObjectOrVector(value)
if value is None:
raise TypeError, "SimObject or SimObjectVector expected"
return value
baseClasses = allClasses.copy()
baseInstances = instanceDict.copy()
def clear():
global allClasses, instanceDict, noCxxHeader
allClasses = baseClasses.copy()
instanceDict = baseInstances.copy()
noCxxHeader = False
# __all__ defines the list of symbols that get exported when
# 'from config import *' is invoked. Try to keep this reasonably
# short to avoid polluting other namespaces.
__all__ = [ 'SimObject' ]
|
Menooker/gem5_pcm
|
src/python/m5/SimObject.py
|
Python
|
bsd-3-clause
| 43,257
|
[
"VisIt"
] |
9a14781e8b1a59e39c78654cca8197d9a01eefdf858d817f97ad508b5e6423c9
|
import unittest
import client
import utilities
import datetime
class TestSeriesFunctions(unittest.TestCase):
def setUp(self):
self.rs = client.RSSeriesClient()
def test_get_identifier(self):
identifier = self.rs.get_identifier('A1')
self.assertEqual(identifier, 'A1')
def test_get_title(self):
test_title = (
'Correspondence files, annual single number series '
'[Main correspondence files series of the agency]'
)
title = self.rs.get_title('A1')
self.assertEqual(title, test_title)
def test_get_accumulation_dates(self):
test_dates = {
'date_str': '01 Jan 1903 - 31 Dec 1938',
'start_date': {
'date': datetime.datetime(1903, 1, 1, 0, 0),
'day': True,
'month': True
},
'end_date': {
'date': datetime.datetime(1938, 12, 31, 0, 0),
'day': True,
'month': True
}
}
accumulation_dates = self.rs.get_accumulation_dates('A1')
self.assertEqual(accumulation_dates, test_dates)
def test_get_contents_dates(self):
test_dates = {
'date_str': '01 Jan 1890 - 31 Dec 1969',
'start_date': {
'date': datetime.datetime(1890, 1, 1, 0, 0),
'day': True,
'month': True
},
'end_date': {
'date': datetime.datetime(1969, 12, 31, 0, 0),
'day': True,
'month': True
}
}
contents_dates = self.rs.get_contents_dates('A1')
self.assertEqual(contents_dates, test_dates)
def test_get_number_described(self):
results = {
'described_note': 'All items from this series are entered on RecordSearch.',
'described_number': 64455
}
items_described = self.rs.get_number_described('A1')
self.assertEqual(items_described, results)
class TestItemFunctions(unittest.TestCase):
def setUp(self):
self.rs = client.RSItemClient()
def test_get_title(self):
test_title = (
'WRAGGE Clement Lionel Egerton : SERN 647 : '
'POB Cheadle England : POE Enoggera QLD : '
'NOK (Father) WRAGGE Clement Lindley'
)
title = self.rs.get_title('3445411')
self.assertEqual(title, test_title)
def test_get_digitised_pages(self):
pages = self.rs.get_digitised_pages('3445411')
self.assertEqual(pages, 47)
class TestClosedItemDetails(unittest.TestCase):
def setUp(self):
self.rs = client.RSItemClient()
def test_details(self):
test_details = {
'access_decision': {
'date_str': u'16 Jul 2012',
'end_date': None,
'start_date': {
'date': datetime.datetime(2012, 7, 16, 0, 0),
'day': True,
'month': True
}
},
'access_reason': [{'note': '', 'reason': u'Withheld pending adv'}],
'access_status': u'Closed',
'contents_dates': {
'date_str': u'1918 - 1925',
'end_date': {
'date': datetime.datetime(1925, 1, 1, 0, 0),
'day': False,
'month': False
},
'start_date': {
'date': datetime.datetime(1918, 1, 1, 0, 0),
'day': False,
'month': False
}
},
'control_symbol': u'G1924/3039',
'digitised_pages': 0,
'digitised_status': False,
'identifier': u'55545',
'location': u'Canberra',
'series': u'A106',
'title': u'Increments to Permanent Professional Officers.'
}
details = self.rs.get_summary('55545')
self.assertEqual(details, test_details)
class TestAgencyFunctions(unittest.TestCase):
def setUp(self):
self.rs = client.RSAgencyClient()
def test_get_identifier(self):
identifier = self.rs.get_identifier('CA 12')
self.assertEqual(identifier, 'CA 12')
def test_get_title(self):
test_title = (
'Prime Minister\'s Department'
)
title = self.rs.get_title('CA 12')
self.assertEqual(title, test_title)
def test_get_dates(self):
test_dates = {
'date_str': '01 Jul 1911 - 12 Mar 1971',
'start_date': {
'date': datetime.datetime(1911, 7, 1, 0, 0),
'day': True,
'month': True
},
'end_date': {
'date': datetime.datetime(1971, 3, 12, 0, 0),
'day': True,
'month': True
}
}
dates = self.rs.get_dates('CA 12')
self.assertEqual(dates, test_dates)
class TestAgencyDetails(unittest.TestCase):
def setUp(self):
self.rs = client.RSAgencyClient()
def test_summary(self):
test_details = {
'agency_id': 'CA 100',
'agency_status': u'Regional or State Office',
'associated_people': None,
'controlled_agencies': None,
'dates': {'date_str': u'01 Oct 1926 - 31 Dec 1936',
'end_date': {'date': datetime.datetime(1936, 12, 31, 0, 0),
'day': True,
'month': True},
'start_date': {'date': datetime.datetime(1926, 10, 1, 0, 0),
'day': True,
'month': True}},
'functions': [{'date_str': u'01 Oct 1926 - 31 Dec 1936',
'end_date': {'date': datetime.datetime(1936, 12, 31, 0, 0),
'day': True,
'month': True},
'identifier': u'HORTICULTURE',
'start_date': {'date': datetime.datetime(1926, 10, 1, 0, 0),
'day': True,
'month': True},
'title': u'HORTICULTURE'}],
'location': u'Victoria',
'previous_agencies': None,
'subsequent_agencies': None,
'superior_agencies': [{'date_str': u'01 Oct 1926 - 31 Jan 1928',
'end_date': {'date': datetime.datetime(1928, 1, 31, 0, 0),
'day': True,
'month': True},
'identifier': u'CA 20',
'start_date': {'date': datetime.datetime(1926, 10, 1, 0, 0),
'day': True,
'month': True},
'title': u'Department of Markets and Migration, Central Administration'},
{'date_str': u'01 Jan 1928 - 31 Dec 1928',
'end_date': {'date': datetime.datetime(1928, 12, 31, 0, 0),
'day': True,
'month': True},
'identifier': u'CA 21',
'start_date': {'date': datetime.datetime(1928, 1, 1, 0, 0),
'day': True,
'month': True},
'title': u'Department of Markets [I], Central Office'},
{'date_str': u'01 Dec 1928 - 30 Apr 1930',
'end_date': {'date': datetime.datetime(1930, 4, 30, 0, 0),
'day': True,
'month': True},
'identifier': u'CA 23',
'start_date': {'date': datetime.datetime(1928, 12, 1, 0, 0),
'day': True,
'month': True},
'title': u'Department of Markets and Transport, Central Office'},
{'date_str': u'01 Apr 1930 - 30 Apr 1932',
'end_date': {'date': datetime.datetime(1932, 4, 30, 0, 0),
'day': True,
'month': True},
'identifier': u'CA 25',
'start_date': {'date': datetime.datetime(1930, 4, 1, 0, 0),
'day': True,
'month': True},
'title': u'Department of Markets [II], Central Office'},
{'date_str': u'01 Apr 1932 - 31 Dec 1936',
'end_date': {'date': datetime.datetime(1936, 12, 31, 0, 0),
'day': True,
'month': True},
'identifier': u'CA 28',
'start_date': {'date': datetime.datetime(1932, 4, 1, 0, 0),
'day': True,
'month': True},
'title': u'Department of Commerce, Central Office'}],
'title': u'State Advisory Fruit Board, Victoria'}
details = self.rs.get_summary('CA 100')
self.assertEqual(details, test_details)
class TestAgencySearch(unittest.TestCase):
def setUp(self):
self.rs = client.RSAgencySearchClient()
def test_totals(self):
test_total = '198'
self.rs.search_agencies(function="MIGRATION")
total = self.rs.total_results
self.assertEqual(total, test_total)
class TestSeriesDetails(unittest.TestCase):
def setUp(self):
self.rs = client.RSSeriesClient()
def test_details(self):
test_details = {
'access_status': {'CLOSED': 0, 'NYE': 0, 'OPEN': 27, 'OWE': 0},
'accumulation_dates': {
'date_str': u'20 Jan 1916 - 31 Jul 1916',
'end_date': {'date': datetime.datetime(1916, 7, 31, 0, 0),
'day': True,
'month': True},
'start_date': {'date': datetime.datetime(1916, 1, 20, 0, 0),
'day': True,
'month': True}},
'arrangement': u'Single number system imposed by National Archives of Australia',
'contents_dates': {
'date_str': u'27 Aug 1914 - 22 Apr 1918',
'end_date': {'date': datetime.datetime(1918, 4, 22, 0, 0),
'day': True,
'month': True},
'start_date': {'date': datetime.datetime(1914, 8, 27, 0, 0),
'day': True,
'month': True}},
'control_symbols': u'[1] - [27]',
'controlling_agencies': [{
'date_str': u'12 Mar 1971 -',
'end_date': {'date': None,
'day': False,
'month': False},
'identifier': u'CA 1401',
'start_date': {'date': datetime.datetime(1971, 3, 12, 0, 0),
'day': True,
'month': True},
'title': u'Department of the Prime Minister and Cabinet'}],
'controlling_series': None,
'identifier': 'CP359/2',
'items_described': {'described_note': u'All items from this series are entered on RecordSearch.', 'described_number': 27},
'items_digitised': 21,
'locations': [{'location': u'ACT', 'quantity': 0.36}],
'physical_format': u'PAPER FILES AND DOCUMENTS',
'previous_series': None,
'recording_agencies': [{'date_str': u'20 Jan 1916 - 31 Jul 1916',
'end_date': {'date': datetime.datetime(1916, 7, 31, 0, 0),
'day': True,
'month': True},
'identifier': u'CA 12',
'start_date': {'date': datetime.datetime(1916, 1, 20, 0, 0),
'day': True,
'month': True},
'title': u"Prime Minister's Department - Prime Minister's Office"},
{'date_str': u'20 Jan 1916 - 31 Jul 1916',
'end_date': {'date': datetime.datetime(1916, 7, 31, 0, 0),
'day': True,
'month': True},
'identifier': u'CP 290',
'start_date': {'date': datetime.datetime(1916, 1, 20, 0, 0),
'day': True,
'month': True},
'title': u'The Rt Hon William Morris HUGHES PC, CH, KC'}],
'related_series': None,
'subsequent_series': None,
'title': u'Subject files maintained by the Prime Minister (William Morris Hughes) during his visit to London, 1916'
}
details = self.rs.get_summary('CP359/2')
self.assertEqual(details, test_details)
class TestSeriesSearch(unittest.TestCase):
def setUp(self):
self.rs = client.RSSeriesSearchClient()
def test_totals(self):
test_total = '429'
self.rs.search_series(agency_recording="CA 12", page=1)
total = self.rs.total_results
self.assertEqual(total, test_total)
class TestUtilityFunctions(unittest.TestCase):
def test_parse_date(self):
cases = [
('2 June 1884', {'date': datetime.datetime(1884, 6, 2), 'day': True, 'month': True}),
('03 Jul 1921', {'date': datetime.datetime(1921, 7, 3), 'day': True, 'month': True}),
('13 Jul. 1921', {'date': datetime.datetime(1921, 7, 13), 'day': True, 'month': True}),
('Dec 1778', {'date': datetime.datetime(1778, 12, 1), 'day': False, 'month': True}),
('1962', {'date': datetime.datetime(1962, 1, 1), 'day': False, 'month': False}),
]
for case in cases:
self.assertEqual(utilities.parse_date(case[0]), case[1])
def test_process_date_string(self):
cases = [
('2 June 1884 - Sep 1884',
{
'date_str': '2 June 1884 - Sep 1884',
'start_date': {'date': datetime.datetime(1884, 6, 2), 'day': True, 'month': True},
'end_date': {'date': datetime.datetime(1884, 9, 1), 'day': False, 'month': True},
}),
]
for case in cases:
self.assertEqual(utilities.process_date_string(case[0]), case[1])
def test_convert_date_to_iso(self):
cases = [
({'date': datetime.datetime(1884, 6, 2), 'day': True, 'month': True}, '1884-06-02'),
({'date': datetime.datetime(1778, 12, 1), 'day': False, 'month': True}, '1778-12'),
({'date': datetime.datetime(1962, 1, 1), 'day': False, 'month': False}, '1962'),
]
for case in cases:
self.assertEqual(utilities.convert_date_to_iso(case[0]), case[1])
if __name__ == '__main__':
unittest.main()
|
wragge/recordsearch_tools
|
tests.py
|
Python
|
cc0-1.0
| 16,330
|
[
"VisIt"
] |
f27598ef7b2337f1af463743db065a4c25756b1db267cb32f6c12f0701677ae9
|
import MDAnalysis
try:
from MDAnalysisTests.datafiles import PSF, DCD
except:
pass
try:
from MDAnalysis.analysis import rms
except:
pass
class SimpleRmsBench(object):
"""Benchmarks for MDAnalysis.analysis.rms.rmsd
"""
params = ([100, 500, 2000],
[None, [1.0, 0.5]],
[False, True],
[False, True])
param_names = ['num_atoms',
'weights',
'center',
'superposition']
def setup(self, num_atoms, weights, center, superposition):
# mimic rmsd docstring example code
self.u = MDAnalysis.Universe(PSF, DCD)
# ag.positions is the new syntax
# but older commit hashes will need to use
# ag.coordinates()
try:
self.A = self.u.atoms.positions.copy()[:num_atoms]
self.u.trajectory[-1]
self.B = self.u.atoms.positions.copy()[:num_atoms]
except:
self.A = self.u.atoms.coordinates().copy()[:num_atoms]
self.u.trajectory[-1]
self.B = self.u.atoms.coordinates().copy()[:num_atoms]
def time_rmsd(self, num_atoms, weights, center, superposition):
"""Benchmark rmsd function using a setup similar to
its docstring example code along with several possible
permutations of parameters.
"""
rms.rmsd(a=self.A,
b=self.B,
weights=weights,
center=center,
superposition=superposition)
class RmsdTrajBench(object):
"""Benchmarks for MDAnalysis.analysis.rms.RMSD
"""
# TODO: RMSD has many parameters / options,
# some of which are apparently still considered
# experimental -- we'll eventually want to
# benchmark more of these
params = (['all', 'backbone'],
[None, 'mass'])
param_names = ['select',
'weights']
def setup(self, select, weights):
self.u = MDAnalysis.Universe(PSF, DCD)
self.RMSD_inst = rms.RMSD(atomgroup=self.u,
reference=None,
select=select,
weights=weights)
def time_RMSD(self, select, weights):
"""Benchmark RMSD.run() method, which parses
over the entire trajectory.
"""
self.RMSD_inst.run()
class RmsfTrajBench(object):
"""Benchmarks for MDAnalysis.analysis.rms.RMSF
"""
params = ([100,500,2000],
[None, 3],
[None,'mass'])
param_names = ['n_atoms',
'step',
'weights']
def setup(self, n_atoms, step, weights):
self.u = MDAnalysis.Universe(PSF, DCD)
self.ag = self.u.atoms[:n_atoms]
self.RMSF_inst = rms.RMSF(atomgroup=self.ag,
weights=weights)
def time_RMSF(self, n_atoms, step, weights):
"""Benchmark RMSF.run() method, which parses
over the entire trajectory.
"""
self.RMSF_inst.run(step=step)
|
MDAnalysis/mdanalysis
|
benchmarks/benchmarks/analysis/rms.py
|
Python
|
gpl-2.0
| 3,079
|
[
"MDAnalysis"
] |
dcf3c48d7a41ad15aa8e21c7c1692972cfcdb420d25746637c10ebd470ec512f
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkExtractPolyDataGeometry(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkExtractPolyDataGeometry(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkExtractPolyDataGeometry.py
|
Python
|
bsd-3-clause
| 507
|
[
"VTK"
] |
7ad12ce414b4485f2331f019fea414b67a265883fa05bf918f6a092909bd08c3
|
"""
MUSE -- A Multi-algorithm-collaborative Universal Structure-prediction Environment
Copyright (C) 2010-2017 by Zhong-Li Liu
This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation
version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
E-mail: zl.liu@163.com
"""
import os,subprocess,time,datetime
from muse.Readwrite import Read_Write
from muse.Readwrite.ReadInput import indict
def Run_lammps(nu,ng,nn,ParSubCom):
os.system("cp ../../lammps.in .")
os.system("cp ../../*.pot . 2>>/dev/null")
New_cry = Read_Write.read_vasp('POSCAR')
Read_Write.write_lammps("data",New_cry)
os.system("sed -i 's/From-muse.in/%.2f/g' lammps.in"%(10000*float(indict['Pressure'][0])))
start = datetime.datetime.now()
go = subprocess.Popen(ParSubCom,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
while go.poll() is None:
time.sleep(2)
now = datetime.datetime.now()
if (now - start).seconds > float(indict['MaxHour'][0])*3600:
os.system("killall -9 $(ps H -e -o cmd --sort=pcpu | tail -1)")
break
with open('../log.muse','a') as logfile: print >>logfile, "Opt. %d ... done."%int(1)
#os.system("cp OUT OUT%s-%s"%(str(ng),str(nn)))
try:
os.system("cp POSCAR CONTCAR")
New_cry = Read_Write.read_lammps("relax.lammpstrj",nu)
Read_Write.write_vasp("CONTCAR",New_cry,label=indict['NameSys'][0]+": "+str(ng)+'-'+str(nn),\
direct=True,sort=True,vasp5=True)
enth = float(os.popen("grep Enthalpy= OUT | awk '{print $2}'").readline())
enth = float("%.8f"%enth)
FinalPressure = float(os.popen("grep Pressure= OUT | awk '{print $2/10000}'").readline())
#os.system("rm OUT")
except:
enth = 111111.11111111
FinalPressure = 111111.11111111
pass
return enth,FinalPressure
|
zhongliliu/muse
|
muse/Calculators/Run_lammps.py
|
Python
|
gpl-2.0
| 2,241
|
[
"LAMMPS"
] |
9841f08a4ea126e7078ba1ef0fd58ec19d36f0465a480c833d9dda536a67e3c9
|
import numpy as np
def compute_gat(arr, sigma_sq, alpha=1):
"""
Generalized Anscombe variance-stabilizing transformation
References:
[1] http://www.cs.tut.fi/~foi/invansc/
[2] M. Makitalo and A. Foi, "Optimal inversion of the generalized
Anscombe transformation for Poisson-Gaussian noise", IEEE Trans.
Image Process, 2012
[3] J.L. Starck, F. Murtagh, and A. Bijaoui, Image Processing
and Data Analysis, Cambridge University Press, Cambridge, 1998)
:param arr: variance-stabilized signal
:param sigma_sq: variance of the Gaussian noise component
:param alpha: scaling factor of the Poisson noise component
:return: variance-stabilized array
"""
v = np.maximum((arr / alpha) + (3. / 8.) + sigma_sq / (alpha**2), 0)
f = 2. * np.sqrt(v)
return f
def compute_inverse_gat(arr, sigma_sq, m=0, alpha=1, method='asym'):
"""
Inverse of the Generalized Anscombe variance-stabilizing
transformation
References:
[1] http://www.cs.tut.fi/~foi/invansc/
[2] M. Makitalo and A. Foi, "Optimal inversion of the generalized
Anscombe transformation for Poisson-Gaussian noise", IEEE Trans.
Image Process, 2012
[3] J.L. Starck, F. Murtagh, and A. Bijaoui, Image Processing
and Data Analysis, Cambridge University Press, Cambridge, 1998)
:param arr: variance-stabilized signal
:param sigma_sq: variance of the Gaussian noise component
:param m: mean of the Gaussian noise component
:param alpha: scaling factor of the Poisson noise component
:param method: 'closed_form' applies the closed-form approximation
of the exact unbiased inverse. 'asym' applies the asymptotic
approximation of the exact unbiased inverse.
:return: inverse variance-stabilized array
"""
sigma_sq /= alpha**2
if method == 'closed-form':
# closed-form approximation of the exact unbiased inverse:
arr_trunc = np.maximum(arr, 0.8)
inverse = ((arr_trunc / 2.)**2 + 0.25 * np.sqrt(1.5) * arr_trunc**-1 - (11. / 8.) * arr_trunc**-2 +
(5. / 8.) * np.sqrt(1.5) * arr_trunc**-3 - (1. / 8.) - sigma_sq)
elif method == 'asym':
# asymptotic approximation of the exact unbiased inverse:
inverse = (arr / 2.)**2 - 1. / 8 - sigma_sq
# inverse = np.maximum(0, inverse)
else:
raise NotImplementedError('Only supports the closed-form')
if alpha != 1:
inverse *= alpha
if m != 0:
inverse += m
return inverse
|
simonsfoundation/CaImAn
|
caiman/external/houghvst/gat.py
|
Python
|
gpl-2.0
| 2,529
|
[
"Gaussian"
] |
8e0696831569cd69645486503faf11477f046bd901c445544e0de4c0b2f0e126
|
from ase import Atoms
from ase.lattice.surface import fcc111, add_adsorbate
from ase.calculators.emt import EMT
from ase.constraints import FixAtoms
from ase.optimize import QuasiNewton
from ase.io import write
# Find the initial and final states for the reaction.
# Set up a (4 x 4) two layer slab of Cu:
slab = fcc111('Cu',size=(4,4,2))
slab.set_pbc((1,1,0))
# Initial state.
# Add the N2 molecule oriented at 60 degrees:
d = 1.10 # N2 bond length
N2mol = Atoms('N2',positions=[[0.0,0.0,0.0],[0.5*3**0.5*d,0.5*d,0.0]])
add_adsorbate(slab,N2mol,height=1.0,position='fcc')
# Use the EMT calculator for the forces and energies:
slab.set_calculator(EMT())
# We don't want to worry about the Cu degrees of freedom,
# so fix these atoms:
mask = [atom.symbol == 'Cu' for atom in slab]
slab.set_constraint(FixAtoms(mask=mask))
# Relax the structure
relax = QuasiNewton(slab)
relax.run(fmax=0.05)
print('initial state:', slab.get_potential_energy())
write('N2.traj', slab)
# Now the final state.
# Move the second N atom to a neighboring hollow site:
slab[-1].position[0] = slab[-2].position[0] + 0.25 * slab.cell[0,0]
slab[-1].position[1] = slab[-2].position[1]
# and relax.
relax.run()
print('final state: ', slab.get_potential_energy())
write('2N.traj', slab)
|
misdoro/python-ase
|
doc/tutorials/N2Cu-Dissociation1.py
|
Python
|
gpl-2.0
| 1,268
|
[
"ASE"
] |
b2c8e037d5152b24f67f35aa26f91bc7fd06ff4013ddc259bb0a4c040279c64b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.