text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions that encode the sequence of PSI module
calls for each of the *name* values of the energy(), optimize(),
response(), and frequency() function. *name* can be assumed lowercase by here.
"""
import os
import sys
import shutil
import subprocess
import warnings
import numpy as np
from qcelemental import constants
from psi4 import extras
from psi4 import core
from psi4.driver import p4util
from psi4.driver import qcdb
from psi4.driver import psifiles as psif
from psi4.driver.p4util.exceptions import ManagedMethodError, PastureRequiredError, ValidationError
#from psi4.driver.molutil import *
from psi4.driver.qcdb.basislist import corresponding_basis
# never import driver, wrappers, or aliases into this file
from .roa import run_roa
from . import proc_util
from . import empirical_dispersion
from . import dft
from . import mcscf
from . import response
from . import solvent
# ATTN NEW ADDITIONS!
# consult http://psicode.org/psi4manual/master/proc_py.html
def select_mp2(name, **kwargs):
"""Function selecting the algorithm for a MP2 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/dfmp2/detci/fnocc
# MP2_TYPE exists largely for py-side reasoning, so must manage it
# here rather than passing to c-side unprepared for validation
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module == 'FNOCC':
func = run_fnocc
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'DFMP2']:
func = run_dfmp2
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference in ['RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'DFMP2']:
func = run_dfmp2
if module == 'DETCI':
core.print_out("""\nDETCI is ill-advised for method MP2 as it is available inefficiently as a """
"""byproduct of a CISD computation.\n DETCI ROHF MP2 will produce non-standard results.\n""")
if func is None:
raise ManagedMethodError(['select_mp2', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP2 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ/dfmp2
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc_gradient
elif module in ['', 'DFMP2']:
func = run_dfmp2_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp2_gradient', name, 'MP2_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2_property(name, **kwargs):
"""Function selecting the algorithm for a MP2 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only dfmp2 for now
func = None
if reference == 'RHF':
if mtd_type == 'DF':
#if module == 'OCC':
# func = run_dfocc_property
if module in ['', 'DFMP2']:
func = run_dfmp2_property
#elif reference == 'UHF':
# if mtd_type == 'DF':
# if module in ['', 'OCC']:
# func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_mp2_property', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2(name, **kwargs):
"""Function selecting the algorithm for an OMP2 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp2', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP2 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp2_gradient', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2_property(name, **kwargs):
"""Function selecting the algorithm for an OMP2 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP2_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_omp2_property', name, 'MP2_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5_property(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_omp2p5_property', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3_property(name, **kwargs):
"""Function selecting the algorithm for an OMP3 property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_omp3_property', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd_property(name, **kwargs):
"""Function selecting the algorithm for an OLCCD property call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_property
if func is None:
raise ManagedMethodError(['select_olccd_property', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp3(name, **kwargs):
"""Function selecting the algorithm for a MP3 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/fnocc/detci
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module == 'FNOCC':
func = run_fnocc
elif module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI': # no default for this case
func = run_detci
elif module in ['']:
core.print_out("""\nThis method is available inefficiently as a """
"""byproduct of a CISD computation.\n Add "set """
"""qc_module detci" to input to access this route.\n""")
if func is None:
raise ManagedMethodError(['select_mp3', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp3_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP3 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp3_gradient', name, 'MP_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3(name, **kwargs):
"""Function selecting the algorithm for an OMP3 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp3', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp3_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP3 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp3_gradient', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2p5(name, **kwargs):
"""Function selecting the algorithm for a MP2.5 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_mp2p5', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp2p5_gradient(name, **kwargs):
"""Function selecting the algorithm for a MP2.5 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_mp2p5_gradient', name, 'MP_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_omp2p5', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_omp2p5_gradient(name, **kwargs):
"""Function selecting the algorithm for an OMP2.5 gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_omp2p5_gradient', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_lccd(name, **kwargs):
"""Function selecting the algorithm for a LCCD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'OCC':
func = run_occ
elif module in ['', 'FNOCC']:
func = run_cepa
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_lccd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_lccd_gradient(name, **kwargs):
"""Function selecting the algorithm for a LCCD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
all_electron = (core.get_global_option('FREEZE_CORE') == "FALSE")
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF']:
if mtd_type == 'CONV':
if all_electron:
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_lccd_gradient', name, 'CC_TYPE', mtd_type, reference, module, all_electron])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd(name, **kwargs):
"""Function selecting the algorithm for an OLCCD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_olccd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_olccd_gradient(name, **kwargs):
"""Function selecting the algorithm for an OLCCD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ
func = None
if reference in ['RHF', 'UHF', 'ROHF', 'RKS', 'UKS']:
if mtd_type == 'CONV':
if module in ['', 'OCC']:
func = run_occ_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
if func is None:
raise ManagedMethodError(['select_olccd_gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_fnoccsd(name, **kwargs):
"""Function selecting the algorithm for a FNO-CCSD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'FNOCC']:
func = run_fnocc
elif mtd_type == 'DF':
if module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module in ['', 'FNOCC']:
func = run_fnodfcc
if func is None:
raise ManagedMethodError(['select_fnoccsd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd(name, **kwargs):
"""Function selecting the algorithm for a CCSD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy/detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'FNOCC':
func = run_fnocc
elif module == 'CCT3' and extras.addons("cct3"):
import cct3
func = cct3.run_cct3
elif module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'CCT3' and extras.addons("cct3"):
import cct3
func = cct3.run_cct3
elif module in ['', 'CCENERGY']:
func = run_ccenergy
if func is None:
raise ManagedMethodError(['select_ccsd', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_gradient(name, **kwargs):
"""Function selecting the algorithm for a CCSD gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
if func is None:
raise ManagedMethodError(['select_ccsd_gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_fnoccsd_t_(name, **kwargs):
"""Function selecting the algorithm for a FNO-CCSD(T) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'FNOCC']:
func = run_fnocc
elif mtd_type == 'DF':
if module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module in ['', 'FNOCC']:
func = run_fnodfcc
if func is None:
raise ManagedMethodError(['select_fnoccsd_t_', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_t_(name, **kwargs):
"""Function selecting the algorithm for a CCSD(T) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'FNOCC':
func = run_fnocc
elif module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif mtd_type == 'CD':
if module == 'OCC':
func = run_dfocc
elif module in ['', 'FNOCC']:
func = run_fnodfcc
elif reference in ['UHF', 'ROHF']:
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
if func is None:
raise ManagedMethodError(['select_ccsd_t_', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_t__gradient(name, **kwargs):
"""Function selecting the algorithm for a CCSD(T) gradient call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only ccenergy
func = None
if reference in ['RHF']:
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc_gradient
elif reference == 'UHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy_gradient
if func is None:
raise ManagedMethodError(['select_ccsd_t__gradient', name, 'CC_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_ccsd_at_(name, **kwargs):
"""Function selecting the algorithm for a CCSD(AT) energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CC_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only [df]occ/ccenergy
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in ['', 'CCENERGY']:
func = run_ccenergy
elif mtd_type == 'DF':
if module in ['', 'OCC']:
func = run_dfocc
elif mtd_type == 'CD':
if module in ['', 'OCC']:
func = run_dfocc
if func is None:
raise ManagedMethodError(['select_ccsd_at_', name, 'CC_TYPE', mtd_type, reference, module])
if name.lower() == "a-ccsd(t)":
pass
elif name.lower() in ["ccsd(at)", "lambda-ccsd(t)"]:
core.print_out(f"""\nMethod "{name.lower()}" has been regularized to "a-ccsd(t)" for QCVariables.""")
name = "a-ccsd(t)"
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_cisd(name, **kwargs):
"""Function selecting the algorithm for a CISD energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('CI_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'FNOCC']:
func = run_cepa
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module in ['', 'DETCI']:
func = run_detci
if func is None:
raise ManagedMethodError(['select_cisd', name, 'CI_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_mp4(name, **kwargs):
"""Function selecting the algorithm for a MP4 energy call
and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only detci/fnocc
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module == 'DETCI':
func = run_detci
elif module in ['', 'FNOCC']:
func = run_fnocc
elif reference == 'ROHF':
if mtd_type == 'CONV':
if module == 'DETCI': # no default for this case
func = run_detci
elif module in ['']:
core.print_out("""\nThis method is available inefficiently as a """
"""byproduct of a CISDT computation.\n Add "set """
"""qc_module detci" to input to access this route.\n""")
if func is None:
raise ManagedMethodError(['select_mp4', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def select_adc2(name, **kwargs):
"""Function selecting the algorithm for ADC(2) excited state energy
call and directing to specified or best-performance default modules.
"""
reference = core.get_option('SCF', 'REFERENCE')
mtd_type = core.get_global_option('MP_TYPE')
module = core.get_global_option('QC_MODULE')
# Considering only adcc/adc
# TODO Actually one should do selection on a couple of other options here
# as well, e.g. adcc supports frozen-core and frozen-virtual,
# spin-specific states or spin-flip methods.
# But as far as I (mfherbst) know the BUILTIN ADC routine only supports
# singlet states and without freezing some core or some virtual orbitals.
func = None
if reference == 'RHF':
if mtd_type == 'CONV':
if module in {'ADCC', ''} and extras.addons("adcc"):
func = run_adcc
elif module in {'BUILTIN', ''}:
func = run_adc
if reference == 'UHF':
if mtd_type == 'CONV':
if module in ['ADCC', ''] and extras.addons("adcc"):
func = run_adcc
# Note: ROHF is theoretically available in adcc, but are not fully tested
# ... so will be added later.
if func is None:
raise ManagedMethodError(['select_adc2', name, 'MP_TYPE', mtd_type, reference, module])
if kwargs.pop('probe', False):
return
else:
return func(name, **kwargs)
def build_disp_functor(name, restricted, save_pairwise_disp=False, **kwargs):
if core.has_option_changed("SCF", "DFT_DISPERSION_PARAMETERS"):
modified_disp_params = core.get_option("SCF", "DFT_DISPERSION_PARAMETERS")
else:
modified_disp_params = None
# Figure out functional
superfunc, disp_type = dft.build_superfunctional(name, restricted)
if disp_type:
if isinstance(name, dict):
# user dft_functional={} spec - type for lookup, dict val for param defs,
# name & citation discarded so only param matches to existing defs will print labels
_disp_functor = empirical_dispersion.EmpiricalDispersion(name_hint='',
level_hint=disp_type["type"],
param_tweaks=disp_type["params"],
save_pairwise_disp=save_pairwise_disp,
engine=kwargs.get('engine', None))
else:
# dft/*functionals.py spec - name & type for lookup, option val for param tweaks
_disp_functor = empirical_dispersion.EmpiricalDispersion(name_hint=superfunc.name(),
level_hint=disp_type["type"],
param_tweaks=modified_disp_params,
save_pairwise_disp=save_pairwise_disp,
engine=kwargs.get('engine', None))
# [Aug 2018] there once was a breed of `disp_type` that quacked
# like a list rather than the more common dict handled above. if
# ever again sighted, make an issue so this code can accommodate.
_disp_functor.print_out()
return superfunc, _disp_functor
else:
return superfunc, None
def scf_wavefunction_factory(name, ref_wfn, reference, **kwargs):
"""Builds the correct (R/U/RO/CU HF/KS) wavefunction from the
provided information, sets relevant auxiliary basis sets on it,
and prepares any empirical dispersion.
"""
# Figure out functional and dispersion
superfunc, _disp_functor = build_disp_functor(name, restricted=(reference in ["RKS", "RHF"]), **kwargs)
# Build the wavefunction
core.prepare_options_for_module("SCF")
if reference in ["RHF", "RKS"]:
wfn = core.RHF(ref_wfn, superfunc)
elif reference == "ROHF":
wfn = core.ROHF(ref_wfn, superfunc)
elif reference in ["UHF", "UKS"]:
wfn = core.UHF(ref_wfn, superfunc)
elif reference == "CUHF":
wfn = core.CUHF(ref_wfn, superfunc)
else:
raise ValidationError("SCF: Unknown reference (%s) when building the Wavefunction." % reference)
if _disp_functor and _disp_functor.engine != 'nl':
wfn._disp_functor = _disp_functor
# Set the DF basis sets
if (("DF" in core.get_global_option("SCF_TYPE")) or
(core.get_option("SCF", "DF_SCF_GUESS") and (core.get_global_option("SCF_TYPE") == "DIRECT"))):
aux_basis = core.BasisSet.build(wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=wfn.basisset().has_puream())
wfn.set_basisset("DF_BASIS_SCF", aux_basis)
else:
wfn.set_basisset("DF_BASIS_SCF", core.BasisSet.zero_ao_basis_set())
# Set the relativistic basis sets
if core.get_global_option("RELATIVISTIC") in ["X2C", "DKH"]:
decon_basis = core.BasisSet.build(wfn.molecule(), "BASIS_RELATIVISTIC",
core.get_option("SCF", "BASIS_RELATIVISTIC"),
"DECON", core.get_global_option('BASIS'),
puream=wfn.basisset().has_puream())
wfn.set_basisset("BASIS_RELATIVISTIC", decon_basis)
# Set the multitude of SAD basis sets
if (core.get_option("SCF", "GUESS") in ["SAD", "SADNO", "HUCKEL"]):
sad_basis_list = core.BasisSet.build(wfn.molecule(), "ORBITAL",
core.get_global_option("BASIS"),
puream=wfn.basisset().has_puream(),
return_atomlist=True)
wfn.set_sad_basissets(sad_basis_list)
if ("DF" in core.get_option("SCF", "SAD_SCF_TYPE")):
# We need to force this to spherical regardless of any user or other demands.
optstash = p4util.OptionsState(['PUREAM'])
core.set_global_option('PUREAM', True)
sad_fitting_list = core.BasisSet.build(wfn.molecule(), "DF_BASIS_SAD",
core.get_option("SCF", "DF_BASIS_SAD"),
puream=True,
return_atomlist=True)
wfn.set_sad_fitting_basissets(sad_fitting_list)
optstash.restore()
if hasattr(core, "EXTERN") and 'external_potentials' in kwargs:
core.print_out("\n Warning! Both an external potential EXTERN object and the external_potential" +
" keyword argument are specified. The external_potentials keyword argument will be ignored.\n")
# If EXTERN is set, then place that potential on the wfn
if hasattr(core, "EXTERN"):
wfn.set_potential_variable("C", core.EXTERN) # This is for the FSAPT procedure
wfn.set_external_potential(core.EXTERN)
elif 'external_potentials' in kwargs:
# For FSAPT, we can take a dictionary of external potentials, e.g.,
# external_potentials={'A': potA, 'B': potB, 'C': potC} (any optional)
# For the dimer SAPT calculation, we need to account for the external potential
# in all of the subsystems A, B, C. So we add them all in total_external_potential
# and set the external potential to the dimer wave function
total_external_potential = core.ExternalPotential()
for frag in kwargs['external_potentials']:
if frag.upper() in "ABC":
wfn.set_potential_variable(frag.upper(), kwargs['external_potentials'][frag].extern)
total_external_potential.appendCharges(kwargs['external_potentials'][frag].extern.getCharges())
else:
core.print_out("\n Warning! Unknown key for the external_potentials argument: %s" % frag)
wfn.set_external_potential(total_external_potential)
return wfn
def scf_helper(name, post_scf=True, **kwargs):
"""Function serving as helper to SCF, choosing whether to cast
up or just run SCF with a standard guess. This preserves
previous SCF options set by other procedures (e.g., SAPT
output file types for SCF).
"""
if post_scf:
name = "scf"
optstash = p4util.OptionsState(
['PUREAM'],
['BASIS'],
['QMEFP'],
['INTS_TOLERANCE'],
['DF_BASIS_SCF'],
['SCF', 'GUESS'],
['SCF', 'DF_INTS_IO'],
['SCF', 'ORBITALS_WRITE'],
['SCF_TYPE'], # Hack: scope gets changed internally with the Andy trick
)
optstash2 = p4util.OptionsState(
['BASIS'],
['DF_BASIS_SCF'],
['SCF_TYPE'],
['SCF', 'DF_INTS_IO'],
)
# Make sure we grab the correctly scoped integral threshold for SCF
core.set_global_option('INTS_TOLERANCE', core.get_option('SCF', 'INTS_TOLERANCE'))
# Grab a few kwargs
use_c1 = kwargs.get('use_c1', False)
scf_molecule = kwargs.get('molecule', core.get_active_molecule())
read_orbitals = core.get_option('SCF', 'GUESS') == "READ"
do_timer = kwargs.pop("do_timer", True)
ref_wfn = kwargs.pop('ref_wfn', None)
if ref_wfn is not None:
raise ValidationError("Cannot seed an SCF calculation with a reference wavefunction ('ref_wfn' kwarg).")
# decide if we keep the checkpoint file
_chkfile = kwargs.get('write_orbitals', True)
write_checkpoint_file = False
if isinstance(_chkfile, str):
write_checkpoint_file = True
filename = kwargs.get('write_orbitals')
core.set_local_option("SCF", "ORBITALS_WRITE", filename)
elif _chkfile is True:
write_checkpoint_file = True
# PCM needs to be run w/o symmetry
if core.get_option("SCF", "PCM"):
c1_molecule = scf_molecule.clone()
c1_molecule.reset_point_group('c1')
c1_molecule.update_geometry()
scf_molecule = c1_molecule
core.print_out(""" PCM does not make use of molecular symmetry: """
"""further calculations in C1 point group.\n""")
# PE needs to use exactly input orientation to correspond to potfile
if core.get_option("SCF", "PE"):
c1_molecule = scf_molecule.clone()
if getattr(scf_molecule, "_initial_cartesian", None) is not None:
c1_molecule._initial_cartesian = scf_molecule._initial_cartesian.clone()
c1_molecule.set_geometry(c1_molecule._initial_cartesian)
c1_molecule.reset_point_group("c1")
c1_molecule.fix_orientation(True)
c1_molecule.fix_com(True)
c1_molecule.update_geometry()
else:
raise ValidationError("Set no_com/no_reorient/symmetry c1 by hand for PE on non-Cartesian molecules.")
scf_molecule = c1_molecule
core.print_out(""" PE does not make use of molecular symmetry: """
"""further calculations in C1 point group.\n""")
core.print_out(""" PE geometry must align with POTFILE keyword: """
"""resetting coordinates with fixed origin and orientation.\n""")
# SCF Banner data
banner = kwargs.pop('banner', None)
bannername = name
# Did we pass in a DFT functional?
dft_func = kwargs.pop('dft_functional', None)
if dft_func is not None:
if name.lower() != "scf":
raise ValidationError("dft_functional was supplied to SCF, but method name was not SCF ('%s')" % name)
name = dft_func
bannername = name
if isinstance(name, dict):
bannername = name.get("name", "custom functional")
# Setup the timer
if do_timer:
core.tstart()
# Second-order SCF requires non-symmetric density matrix support
if core.get_option('SCF', 'SOSCF'):
proc_util.check_non_symmetric_jk_density("Second-order SCF")
# sort out cast_up settings. no need to stash these since only read, never reset
cast = False
if core.has_option_changed('SCF', 'BASIS_GUESS'):
cast = core.get_option('SCF', 'BASIS_GUESS')
if p4util.yes.match(str(cast)):
cast = True
elif p4util.no.match(str(cast)):
cast = False
if cast:
# A user can set "BASIS_GUESS" to True and we default to 3-21G
if cast is True:
guessbasis = corresponding_basis(core.get_global_option('BASIS'), 'GUESS')[0]
if guessbasis is None:
guessbasis = '3-21G' # guess of last resort
else:
guessbasis = cast
core.set_global_option('BASIS', guessbasis)
castdf = 'DF' in core.get_global_option('SCF_TYPE')
if core.has_option_changed('SCF', 'DF_BASIS_GUESS'):
castdf = core.get_option('SCF', 'DF_BASIS_GUESS')
if p4util.yes.match(str(castdf)):
castdf = True
elif p4util.no.match(str(castdf)):
castdf = False
if castdf:
core.set_global_option('SCF_TYPE', 'DF')
core.set_local_option('SCF', 'DF_INTS_IO', 'none')
# Figure out the fitting basis set
if castdf is True:
core.set_global_option('DF_BASIS_SCF', '')
elif isinstance(castdf, str):
core.set_global_option('DF_BASIS_SCF', castdf)
else:
raise ValidationError("Unexpected castdf option (%s)." % castdf)
# Switch to the guess namespace
namespace = core.IO.get_default_namespace()
guesspace = namespace + '.guess'
if namespace == '':
guesspace = 'guess'
core.IO.set_default_namespace(guesspace)
# Print some info about the guess
core.print_out('\n')
p4util.banner('Guess SCF, %s Basis' % (guessbasis))
core.print_out('\n')
# sort out broken_symmetry settings.
if 'brokensymmetry' in kwargs:
multp = scf_molecule.multiplicity()
if multp != 1:
raise ValidationError('Broken symmetry is only for singlets.')
if core.get_option('SCF', 'REFERENCE') not in ['UHF', 'UKS']:
raise ValidationError("""You must specify 'set reference uhf' to use broken symmetry.""")
do_broken = True
else:
do_broken = False
if cast and read_orbitals:
raise ValidationError("""Detected options to both cast and read orbitals""")
if cast and do_broken:
raise ValidationError("""Detected options to both cast and perform a broken symmetry computation""")
if (core.get_option('SCF', 'STABILITY_ANALYSIS') == 'FOLLOW') and (core.get_option('SCF', 'REFERENCE') != 'UHF'):
raise ValidationError("""Stability analysis root following is only available for UHF""")
# broken set-up
if do_broken:
raise ValidationError("""Broken symmetry computations are not currently enabled.""")
scf_molecule.set_multiplicity(3)
core.print_out('\n')
p4util.banner(' Computing high-spin triplet guess ')
core.print_out('\n')
# If GUESS is auto guess what it should be
if core.get_option('SCF', 'GUESS') == "AUTO":
if (scf_molecule.natom() > 1):
core.set_local_option('SCF', 'GUESS', 'SAD')
else:
core.set_local_option('SCF', 'GUESS', 'CORE')
if core.get_global_option('BASIS') in ['', '(AUTO)']:
if name in ['hf3c', 'hf-3c']:
core.set_global_option('BASIS', 'minix')
elif name in ['pbeh3c', 'pbeh-3c']:
core.set_global_option('BASIS', 'def2-msvp')
# the FIRST scf call
if cast or do_broken:
# Cast or broken are special cases
base_wfn = core.Wavefunction.build(scf_molecule, core.get_global_option('BASIS'))
core.print_out("\n ---------------------------------------------------------\n")
if banner:
core.print_out(" " + banner.center(58))
if cast:
core.print_out(" " + "SCF Castup computation".center(58))
ref_wfn = scf_wavefunction_factory(name, base_wfn, core.get_option('SCF', 'REFERENCE'), **kwargs)
core.set_legacy_wavefunction(ref_wfn)
# Compute additive correction: dftd3, mp2d, dftd4, etc.
if hasattr(ref_wfn, "_disp_functor"):
disp_energy = ref_wfn._disp_functor.compute_energy(ref_wfn.molecule())
ref_wfn.set_variable("-D Energy", disp_energy)
ref_wfn.compute_energy()
# broken clean-up
if do_broken:
raise ValidationError("Broken Symmetry computations are temporarily disabled.")
scf_molecule.set_multiplicity(1)
core.set_local_option('SCF', 'GUESS', 'READ')
core.print_out('\n')
p4util.banner(' Computing broken symmetry solution from high-spin triplet guess ')
core.print_out('\n')
# cast clean-up
if cast:
# Move files to proper namespace
core.IO.change_file_namespace(180, guesspace, namespace)
core.IO.set_default_namespace(namespace)
optstash2.restore()
# Print the banner for the standard operation
core.print_out('\n')
p4util.banner(bannername.upper())
core.print_out('\n')
# the SECOND scf call
base_wfn = core.Wavefunction.build(scf_molecule, core.get_global_option('BASIS'))
if banner:
core.print_out("\n ---------------------------------------------------------\n")
core.print_out(" " + banner.center(58))
scf_wfn = scf_wavefunction_factory(name, base_wfn, core.get_option('SCF', 'REFERENCE'), **kwargs)
core.set_legacy_wavefunction(scf_wfn)
# The wfn from_file routine adds the npy suffix if needed, but we add it here so that
# we can use os.path.isfile to query whether the file exists before attempting to read
read_filename = scf_wfn.get_scratch_filename(180) + '.npy'
if ((core.get_option('SCF', 'GUESS') == 'READ') and os.path.isfile(read_filename)):
old_wfn = core.Wavefunction.from_file(read_filename)
Ca_occ = old_wfn.Ca_subset("SO", "OCC")
Cb_occ = old_wfn.Cb_subset("SO", "OCC")
if old_wfn.molecule().schoenflies_symbol() != scf_molecule.schoenflies_symbol():
raise ValidationError("Cannot compute projection of different symmetries.")
if old_wfn.basisset().name() == scf_wfn.basisset().name():
core.print_out(f" Reading orbitals from file {read_filename}, no projection.\n\n")
scf_wfn.guess_Ca(Ca_occ)
scf_wfn.guess_Cb(Cb_occ)
else:
core.print_out(f" Reading orbitals from file {read_filename}, projecting to new basis.\n\n")
core.print_out(" Computing basis projection from %s to %s\n\n" % (old_wfn.basisset().name(), scf_wfn.basisset().name()))
pCa = scf_wfn.basis_projection(Ca_occ, old_wfn.nalphapi(), old_wfn.basisset(), scf_wfn.basisset())
pCb = scf_wfn.basis_projection(Cb_occ, old_wfn.nbetapi(), old_wfn.basisset(), scf_wfn.basisset())
scf_wfn.guess_Ca(pCa)
scf_wfn.guess_Cb(pCb)
# Strip off headers to only get R, RO, U, CU
old_ref = old_wfn.name().replace("KS", "").replace("HF", "")
new_ref = scf_wfn.name().replace("KS", "").replace("HF", "")
if old_ref != new_ref:
scf_wfn.reset_occ_ = True
elif (core.get_option('SCF', 'GUESS') == 'READ') and not os.path.isfile(read_filename):
core.print_out(f"\n !!! Unable to find file {read_filename}, defaulting to SAD guess. !!!\n\n")
core.set_local_option('SCF', 'GUESS', 'SAD')
sad_basis_list = core.BasisSet.build(scf_wfn.molecule(), "ORBITAL",
core.get_global_option("BASIS"),
puream=scf_wfn.basisset().has_puream(),
return_atomlist=True)
scf_wfn.set_sad_basissets(sad_basis_list)
if ("DF" in core.get_option("SCF", "SAD_SCF_TYPE")):
sad_fitting_list = core.BasisSet.build(scf_wfn.molecule(), "DF_BASIS_SAD",
core.get_option("SCF", "DF_BASIS_SAD"),
puream=scf_wfn.basisset().has_puream(),
return_atomlist=True)
scf_wfn.set_sad_fitting_basissets(sad_fitting_list)
if cast:
core.print_out("\n Computing basis projection from %s to %s\n\n" % (ref_wfn.basisset().name(), base_wfn.basisset().name()))
if ref_wfn.basisset().n_ecp_core() != base_wfn.basisset().n_ecp_core():
raise ValidationError("Projecting from basis ({}) with ({}) ECP electrons to basis ({}) with ({}) ECP electrons will be a disaster. Select a compatible cast-up basis with `set guess_basis YOUR_BASIS_HERE`.".format(
ref_wfn.basisset().name(), ref_wfn.basisset().n_ecp_core(), base_wfn.basisset().name(), base_wfn.basisset().n_ecp_core()))
pCa = ref_wfn.basis_projection(ref_wfn.Ca(), ref_wfn.nalphapi(), ref_wfn.basisset(), scf_wfn.basisset())
pCb = ref_wfn.basis_projection(ref_wfn.Cb(), ref_wfn.nbetapi(), ref_wfn.basisset(), scf_wfn.basisset())
scf_wfn.guess_Ca(pCa)
scf_wfn.guess_Cb(pCb)
# Print basis set info
if core.get_option("SCF", "PRINT_BASIS"):
scf_wfn.basisset().print_detail_out()
# Compute additive correction: dftd3, mp2d, dftd4, etc.
if hasattr(scf_wfn, "_disp_functor"):
disp_energy = scf_wfn._disp_functor.compute_energy(scf_wfn.molecule(), scf_wfn)
scf_wfn.set_variable("-D Energy", disp_energy)
# PCM preparation
if core.get_option('SCF', 'PCM'):
if core.get_option('SCF', 'PE'):
raise ValidationError("""Error: 3-layer QM/MM/PCM not implemented.\n""")
pcmsolver_parsed_fname = core.get_local_option('PCM', 'PCMSOLVER_PARSED_FNAME')
pcm_print_level = core.get_option('SCF', "PRINT")
scf_wfn.set_PCM(core.PCM(pcmsolver_parsed_fname, pcm_print_level, scf_wfn.basisset()))
# PE preparation
if core.get_option('SCF', 'PE'):
if not solvent._have_pe:
raise ModuleNotFoundError('Python module cppe not found. Solve by installing it: `conda install -c psi4 pycppe`')
# PE needs information about molecule and basis set
pol_embed_options = solvent.pol_embed.get_pe_options()
core.print_out(f""" Using potential file
{pol_embed_options["potfile"]}
for Polarizable Embedding calculation.\n""")
scf_wfn.pe_state = solvent.pol_embed.CppeInterface(
molecule=scf_molecule, options=pol_embed_options,
basisset=scf_wfn.basisset()
)
e_scf = scf_wfn.compute_energy()
for obj in [core, scf_wfn]:
# set_variable("SCF TOTAL ENERGY") # P::e SCF
for pv in ["SCF TOTAL ENERGY", "CURRENT ENERGY", "CURRENT REFERENCE ENERGY"]:
obj.set_variable(pv, e_scf)
# We always would like to print a little property information
if kwargs.get('scf_do_properties', True):
oeprop = core.OEProp(scf_wfn)
oeprop.set_title("SCF")
# Figure our properties, if empty do dipole
props = [x.upper() for x in core.get_option("SCF", "SCF_PROPERTIES")]
if "DIPOLE" not in props:
props.append("DIPOLE")
proc_util.oeprop_validator(props)
for x in props:
oeprop.add(x)
# Populate free-atom volumes
# if we're doing MBIS
if 'MBIS_VOLUME_RATIOS' in props:
p4util.free_atom_volumes(scf_wfn)
# Compute properties
oeprop.compute()
for obj in [core, scf_wfn]:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# component qcvars can be retired at v1.5
for xyz in 'XYZ':
obj.set_variable('CURRENT DIPOLE ' + xyz, obj.variable('SCF DIPOLE ' + xyz))
obj.set_variable("CURRENT DIPOLE", obj.variable("SCF DIPOLE")) # P::e SCF
# Write out MO's
if core.get_option("SCF", "PRINT_MOS"):
mowriter = core.MOWriter(scf_wfn)
mowriter.write()
# Write out a molden file
if core.get_option("SCF", "MOLDEN_WRITE"):
filename = core.get_writer_file_prefix(scf_molecule.name()) + ".molden"
dovirt = bool(core.get_option("SCF", "MOLDEN_WITH_VIRTUAL"))
occa = scf_wfn.occupation_a()
occb = scf_wfn.occupation_a()
mw = core.MoldenWriter(scf_wfn)
mw.write(filename, scf_wfn.Ca(), scf_wfn.Cb(), scf_wfn.epsilon_a(),
scf_wfn.epsilon_b(), scf_wfn.occupation_a(),
scf_wfn.occupation_b(), dovirt)
# Write checkpoint file (orbitals and basis); Can be disabled, e.g., for findif displacements
if write_checkpoint_file and isinstance(_chkfile, str):
filename = kwargs['write_orbitals']
scf_wfn.to_file(filename)
# core.set_local_option("SCF", "ORBITALS_WRITE", filename)
elif write_checkpoint_file:
filename = scf_wfn.get_scratch_filename(180)
scf_wfn.to_file(filename)
extras.register_numpy_file(filename) # retain with -m (messy) option
if do_timer:
core.tstop()
optstash.restore()
if (not use_c1) or (scf_molecule.schoenflies_symbol() == 'c1'):
return scf_wfn
else:
# C1 copy quietly
c1_optstash = p4util.OptionsState(['PRINT'])
core.set_global_option("PRINT", 0)
# If we force c1 copy the active molecule
scf_molecule.update_geometry()
core.print_out("""\n A requested method does not make use of molecular symmetry: """
"""further calculations in C1 point group.\n\n""")
c1_molecule = scf_molecule.clone()
c1_molecule.reset_point_group('c1')
c1_molecule.fix_orientation(True)
c1_molecule.fix_com(True)
c1_molecule.update_geometry()
c1_basis = core.BasisSet.build(c1_molecule, "ORBITAL", core.get_global_option('BASIS'), quiet=True)
tmp = scf_wfn.c1_deep_copy(c1_basis)
c1_jkbasis = core.BasisSet.build(c1_molecule, "DF_BASIS_SCF",
core.get_global_option("DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'), quiet=True)
tmp.set_basisset("DF_BASIS_SCF", c1_jkbasis)
c1_optstash.restore()
return tmp
def run_dct(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density cumulant theory calculation.
"""
if (core.get_global_option('FREEZE_CORE') == 'TRUE'):
raise ValidationError('Frozen core is not available for DCT.')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
if (core.get_global_option("DCT_TYPE") == "DF"):
core.print_out(" Constructing Basis Sets for DCT...\n\n")
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_DCT",
core.get_global_option("DF_BASIS_DCT"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_DCT", aux_basis)
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
dct_wfn = core.dct(ref_wfn)
else:
# Ensure IWL files have been written for non DF-DCT
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
dct_wfn = core.dct(ref_wfn)
for k, v in dct_wfn.variables().items():
core.set_variable(k, v)
return dct_wfn
def run_dct_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
DCT gradient calculation.
"""
optstash = p4util.OptionsState(
['GLOBALS', 'DERTYPE'])
core.set_global_option('DERTYPE', 'FIRST')
dct_wfn = run_dct_property(name, **kwargs)
derivobj = core.Deriv(dct_wfn)
derivobj.set_tpdm_presorted(True)
if core.get_option('DCT', 'DCT_TYPE') == 'CONV':
grad = derivobj.compute()
else:
grad = derivobj.compute_df('DF_BASIS_SCF', 'DF_BASIS_DCT')
dct_wfn.set_gradient(grad)
optstash.restore()
return dct_wfn
def run_dct_property(name, **kwargs):
""" Function encoding sequence of PSI module calls for
DCT property calculation.
"""
optstash = p4util.OptionsState(
['DCT', 'OPDM'])
core.set_local_option('DCT', 'OPDM', 'true')
dct_wfn = run_dct(name, **kwargs)
# Run OEProp
oe = core.OEProp(dct_wfn)
oe.set_title("DCT")
for prop in kwargs.get("properties", []):
prop = prop.upper()
if prop in core.OEProp.valid_methods or "MULTIPOLE(" in prop:
oe.add(prop)
oe.compute()
dct_wfn.oeprop = oe
for k, v in dct_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return dct_wfn
def run_dfocc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted or Cholesky-decomposed
(non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'DO_SCS'],
['DFOCC', 'DO_SOS'],
['DFOCC', 'READ_SCF_3INDEX'],
['DFOCC', 'CHOLESKY'],
['DFOCC', 'CC_LAMBDA'])
def set_cholesky_from(corl_type):
if corl_type == 'DF':
core.set_local_option('DFOCC', 'CHOLESKY', 'FALSE')
proc_util.check_disk_df(name.upper(), optstash)
elif corl_type == 'CD':
core.set_local_option('DFOCC', 'CHOLESKY', 'TRUE')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
optstash.add_option(['SCF_TYPE'])
core.set_global_option('SCF_TYPE', 'CD')
core.print_out(""" SCF Algorithm Type (re)set to CD.\n""")
if core.get_global_option('SCF_TYPE') != 'CD':
core.set_local_option('DFOCC', 'READ_SCF_3INDEX', 'FALSE')
else:
raise ValidationError(f"""Invalid type '{corl_type}' for DFOCC""")
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
corl_type = core.get_global_option('MP2_TYPE')
elif name in ['mp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
corl_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
elif name in ['omp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
corl_type = core.get_global_option('MP_TYPE')
elif name in ['mp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
corl_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
elif name in ['omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
corl_type = core.get_global_option('MP_TYPE')
elif name in ['lccd', 'olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'ccd':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCD')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'ccsd':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'ccsd(t)':
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(T)')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'a-ccsd(t)':
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(AT)')
corl_type = core.get_global_option('CC_TYPE')
elif name == 'dfocc':
pass
else:
raise ValidationError('Unidentified method %s' % (name))
set_cholesky_from(corl_type)
# conventional vs. optimized orbitals
if name in ['mp2', 'mp2.5', 'mp3', 'lccd',
'ccd', 'ccsd', 'ccsd(t)', 'a-ccsd(t)']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp2.5', 'omp3', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
if name in ["mp2.5", "mp3"] and not core.has_global_option_changed("MP_TYPE"):
core.print_out(f" Information: {name.upper()} default algorithm changed to DF in August 2020. Use `set mp_type conv` for previous behavior.\n")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
if not core.get_local_option("DFOCC", "CHOLESKY"):
core.print_out(" Constructing Basis Sets for DFOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
# Shove variables into global space
if name in ['mp2', 'omp2', 'mp2.5', 'mp3', 'lccd',]:
for k, v in dfocc_wfn.variables().items():
core.set_variable(k, v)
if name == "a-ccsd(t)":
# temporary until dfocc can be edited and qcvar name changed
core.set_variable("A-CCSD(T) TOTAL ENERGY", core.variables()["CCSD(AT) TOTAL ENERGY"])
core.set_variable("A-(T) CORRECTION ENERGY", core.variables()["(AT) CORRECTION ENERGY"])
core.del_variable("CCSD(AT) TOTAL ENERGY")
core.del_variable("(AT) CORRECTION ENERGY")
optstash.restore()
return dfocc_wfn
def run_dfocc_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted (non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['REFERENCE'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'CC_LAMBDA'],
['GLOBALS', 'DERTYPE'])
proc_util.check_disk_df(name.upper(), optstash)
if core.get_global_option('SCF_TYPE') != 'DISK_DF':
raise ValidationError('DFOCC gradients need DF-SCF reference.')
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
corl_type = core.get_global_option('MP2_TYPE')
elif name in ['mp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
corl_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
elif name in ["omp2.5"]:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
corl_type = core.get_global_option('MP_TYPE')
elif name in ['mp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
corl_type = core.get_global_option('MP_TYPE') if core.has_global_option_changed("MP_TYPE") else "DF"
elif name in ['omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
corl_type = core.get_global_option('MP_TYPE')
elif name in ['lccd', 'olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
corl_type = core.get_global_option('CC_TYPE')
elif name in ['ccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCD')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
corl_type = core.get_global_option('CC_TYPE')
elif name in ['ccsd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
corl_type = core.get_global_option('CC_TYPE')
elif name in ['ccsd(t)']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-CCSD(T)')
core.set_local_option('DFOCC', 'CC_LAMBDA', 'TRUE')
corl_type = core.get_global_option('CC_TYPE')
else:
raise ValidationError('Unidentified method %s' % (name))
if name in ['mp2', 'mp2.5', 'mp3', 'lccd', 'ccd', 'ccsd', 'ccsd(t)']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp2.5', 'omp3', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
if corl_type not in ["DF", "CD"]:
raise ValidationError(f"""Invalid type '{corl_type}' for DFOCC""")
core.set_global_option('DERTYPE', 'FIRST')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
if name in ["mp2.5", "mp3"] and not core.has_global_option_changed("MP_TYPE"):
core.print_out(f" Information: {name.upper()} default algorithm changed to DF in August 2020. Use `set mp_type conv` for previous behavior.\n")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
core.print_out(" Constructing Basis Sets for DFOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
derivobj = core.Deriv(dfocc_wfn)
derivobj.compute_df("DF_BASIS_SCF", "DF_BASIS_CC")
dfocc_wfn.set_variable(f"{name.upper()} TOTAL GRADIENT", dfocc_wfn.gradient())
# Shove variables into global space
if name in ['mp2', 'mp2.5', 'mp3', 'lccd', 'ccsd', 'omp2']:
for k, v in dfocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return dfocc_wfn
def run_dfocc_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted (non-)orbital-optimized MPN or CC computation.
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'OEPROP'])
if name in ['mp2', 'omp2']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2')
elif name in ['omp3']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP3')
elif name in ['omp2.5']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OMP2.5')
elif name in ['olccd']:
core.set_local_option('DFOCC', 'WFN_TYPE', 'DF-OLCCD')
else:
raise ValidationError('Unidentified method ' % (name))
proc_util.check_disk_df(name.upper(), optstash)
if name in ['mp2']:
core.set_local_option('DFOCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'omp3', 'omp2.5', 'olccd']:
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'OEPROP', 'TRUE')
core.set_local_option('DFOCC', 'DO_SCS', 'FALSE')
core.set_local_option('DFOCC', 'DO_SOS', 'FALSE')
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DFOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
core.print_out(" Constructing Basis Sets for DFOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
# Shove variables into global space
# TODO: Make other methods in DFOCC update all variables, then add them to the list. Adding now, risks setting outdated information.
if name in ['mp2', 'omp2']:
for k, v in dfocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return dfocc_wfn
def run_qchf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an density-fitted orbital-optimized MP2 computation
"""
optstash = p4util.OptionsState(
['SCF', 'DF_INTS_IO'],
['DF_BASIS_SCF'],
['DIE_IF_NOT_CONVERGED'],
['MAXITER'],
['DFOCC', 'ORB_OPT'],
['DFOCC', 'WFN_TYPE'],
['DFOCC', 'QCHF'],
['DFOCC', 'E_CONVERGENCE'])
core.set_local_option('DFOCC', 'ORB_OPT', 'TRUE')
core.set_local_option('DFOCC', 'WFN_TYPE', 'QCHF')
core.set_local_option('DFOCC', 'QCHF', 'TRUE')
core.set_local_option('DFOCC', 'E_CONVERGENCE', 8)
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
core.set_local_option('SCF', 'DIE_IF_NOT_CONVERGED', 'FALSE')
core.set_local_option('SCF', 'MAXITER', 1)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" QCHF does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
dfocc_wfn = core.dfocc(ref_wfn)
return dfocc_wfn
def run_occ(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a conventional integral (O)MPN computation
"""
# Stash these options so we can reload them at computation end.
optstash = p4util.OptionsState(
['OCC', 'SPIN_SCALE_TYPE'],
['OCC', 'ORB_OPT'],
['OCC', 'WFN_TYPE'])
if name == 'mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'scs(n)-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCSN')
elif name == 'scs-mp2-vdw':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCSVDW')
elif name == 'sos-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOS')
elif name == 'sos-pi-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOSPI')
elif name == 'custom-scs-mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'sos-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOS')
elif name == 'custom-scs-omp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'custom-scs-mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'scs-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SCS')
elif name == 'sos-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'SOS')
elif name == 'custom-scs-omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
elif name == 'olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
elif name == 'custom-scs-olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'CUSTOM')
else:
raise ValidationError("""Invalid method %s""" % name)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
occ_wfn = core.occ(ref_wfn)
# Shove variables into global space
keep_custom_spin_scaling = core.has_option_changed("OCC", "SS_SCALE") or core.has_option_changed("OCC", "OS_SCALE")
for k, v in occ_wfn.variables().items():
# Custom spin component scaling variables are meaningless if custom scalings hasn't been set. Delete them.
if k.startswith("CUSTOM SCS") and not keep_custom_spin_scaling:
occ_wfn.del_variable(k)
else:
core.set_variable(k, v)
optstash.restore()
return occ_wfn
def run_occ_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a conventional integral (O)MPN computation
"""
optstash = p4util.OptionsState(
['OCC', 'ORB_OPT'],
['OCC', 'WFN_TYPE'],
['OCC', 'DO_SCS'],
['OCC', 'DO_SOS'],
['GLOBALS', 'DERTYPE'])
if core.get_global_option('SCF_TYPE') in ['CD', 'DF', 'MEM_DF', 'DISK_DF']:
raise ValidationError('OCC gradients need conventional SCF reference.')
if name == 'mp2':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name in ['omp2', 'conv-omp2']:
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'mp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'omp2.5':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP2.5')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'mp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'omp3':
core.set_local_option('OCC', 'WFN_TYPE', 'OMP3')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
elif name == 'lccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'FALSE')
elif name == 'olccd':
core.set_local_option('OCC', 'WFN_TYPE', 'OCEPA')
core.set_local_option('OCC', 'ORB_OPT', 'TRUE')
else:
raise ValidationError("""Invalid method %s""" % name)
core.set_global_option('DERTYPE', 'FIRST')
# locking out SCS through explicit keyword setting
# * so that current energy must match call
# * since grads not avail for scs
core.set_local_option('OCC', 'SPIN_SCALE_TYPE', 'NONE')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
ref_wfn.semicanonicalize()
occ_wfn = core.occ(ref_wfn)
derivobj = core.Deriv(occ_wfn)
grad = derivobj.compute()
occ_wfn.set_gradient(grad)
occ_wfn.set_variable(f"{name.upper()} TOTAL GRADIENT", grad)
# Shove variables into global space
keep_custom_spin_scaling = core.has_option_changed("OCC", "SS_SCALE") or core.has_option_changed("OCC", "OS_SCALE")
for k, v in occ_wfn.variables().items():
# Custom spin component scaling variables are meaningless if custom scalings hasn't been set. Delete them.
if k.startswith("CUSTOM SCS") and not keep_custom_spin_scaling:
occ_wfn.del_variable(k)
else:
core.set_variable(k, v)
optstash.restore()
return occ_wfn
def run_scf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a self-consistent-field theory (HF & DFT) calculation.
"""
optstash_mp2 = p4util.OptionsState(
['DF_BASIS_MP2'],
['DFMP2', 'MP2_OS_SCALE'],
['DFMP2', 'MP2_SS_SCALE'])
dft_func = False
if "dft_functional" in kwargs:
dft_func = True
optstash_scf = proc_util.scf_set_reference_local(name, is_dft=dft_func)
# See if we're doing TDSCF after, keep JK if so
if sum(core.get_option("SCF", "TDSCF_STATES")) > 0:
core.set_local_option("SCF", "SAVE_JK", True)
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
scf_wfn = scf_helper(name, post_scf=False, **kwargs)
returnvalue = scf_wfn.energy()
ssuper = scf_wfn.functional()
if ssuper.is_c_hybrid():
core.tstart()
aux_basis = core.BasisSet.build(scf_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'),
puream=-1)
scf_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
if ssuper.is_c_scs_hybrid():
core.set_local_option('DFMP2', 'MP2_OS_SCALE', ssuper.c_os_alpha())
core.set_local_option('DFMP2', 'MP2_SS_SCALE', ssuper.c_ss_alpha())
dfmp2_wfn = core.dfmp2(scf_wfn)
dfmp2_wfn.compute_energy()
vdh = dfmp2_wfn.variable('CUSTOM SCS-MP2 CORRELATION ENERGY')
else:
dfmp2_wfn = core.dfmp2(scf_wfn)
dfmp2_wfn.compute_energy()
vdh = ssuper.c_alpha() * dfmp2_wfn.variable('MP2 CORRELATION ENERGY')
# remove misleading MP2 psivars computed with DFT, not HF, reference
for var in dfmp2_wfn.variables():
if var.startswith('MP2 ') and ssuper.name() not in ['MP2D']:
scf_wfn.del_variable(var)
scf_wfn.set_variable("DOUBLE-HYBRID CORRECTION ENERGY", vdh) # P::e SCF
scf_wfn.set_variable("{} DOUBLE-HYBRID CORRECTION ENERGY".format(ssuper.name()), vdh)
returnvalue += vdh
scf_wfn.set_variable("DFT TOTAL ENERGY", returnvalue) # P::e SCF
for pv, pvv in scf_wfn.variables().items():
if pv.endswith('DISPERSION CORRECTION ENERGY') and pv.startswith(ssuper.name()):
fctl_plus_disp_name = pv.split()[0]
scf_wfn.set_variable(fctl_plus_disp_name + ' TOTAL ENERGY', returnvalue)
break
else:
scf_wfn.set_variable('{} TOTAL ENERGY'.format(ssuper.name()), returnvalue)
scf_wfn.set_variable('CURRENT ENERGY', returnvalue)
scf_wfn.set_energy(returnvalue)
core.print_out('\n\n')
core.print_out(' %s Energy Summary\n' % (name.upper()))
core.print_out(' ' + '-' * (15 + len(name)) + '\n')
core.print_out(' DFT Reference Energy = %22.16lf\n' % (returnvalue - vdh))
core.print_out(' Scaled MP2 Correlation = %22.16lf\n' % (vdh))
core.print_out(' @Final double-hybrid DFT total energy = %22.16lf\n\n' % (returnvalue))
core.tstop()
if ssuper.name() == 'MP2D':
for pv, pvv in dfmp2_wfn.variables().items():
scf_wfn.set_variable(pv, pvv)
# Conversely, remove DFT qcvars from MP2D
for var in scf_wfn.variables():
if 'DFT ' in var or 'DOUBLE-HYBRID ' in var:
scf_wfn.del_variable(var)
# DFT groups dispersion with SCF. Reshuffle so dispersion with MP2 for MP2D.
for pv in ['SCF TOTAL ENERGY', 'SCF ITERATION ENERGY', 'MP2 TOTAL ENERGY']:
scf_wfn.set_variable(pv, scf_wfn.variable(pv) - scf_wfn.variable('DISPERSION CORRECTION ENERGY'))
scf_wfn.set_variable('MP2D CORRELATION ENERGY', scf_wfn.variable('MP2 CORRELATION ENERGY') + scf_wfn.variable('DISPERSION CORRECTION ENERGY'))
scf_wfn.set_variable('MP2D TOTAL ENERGY', scf_wfn.variable('MP2D CORRELATION ENERGY') + scf_wfn.variable('HF TOTAL ENERGY'))
scf_wfn.set_variable('CURRENT ENERGY', scf_wfn.variable('MP2D TOTAL ENERGY'))
scf_wfn.set_variable('CURRENT CORRELATION ENERGY', scf_wfn.variable('MP2D CORRELATION ENERGY'))
scf_wfn.set_variable('CURRENT REFERENCE ENERGY', scf_wfn.variable('SCF TOTAL ENERGY'))
# Shove variables into global space
for k, v in scf_wfn.variables().items():
core.set_variable(k, v)
optstash_scf.restore()
optstash_mp2.restore()
return scf_wfn
def run_scf_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a SCF gradient calculation.
"""
dft_func = False
if "dft_functional" in kwargs:
dft_func = True
optstash = proc_util.scf_set_reference_local(name, is_dft=dft_func)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = run_scf(name, **kwargs)
if core.get_option('SCF', 'REFERENCE') in ['ROHF', 'CUHF']:
ref_wfn.semicanonicalize()
if hasattr(ref_wfn, "_disp_functor"):
disp_grad = ref_wfn._disp_functor.compute_gradient(ref_wfn.molecule(), ref_wfn)
ref_wfn.set_variable("-D Gradient", disp_grad)
grad = core.scfgrad(ref_wfn)
if ref_wfn.basisset().has_ECP():
core.print_out("\n\n ==> Adding ECP gradient terms (computed numerically) <==\n")
# Build a map of atom->ECP number
old_print = ref_wfn.get_print()
ref_wfn.set_print(0)
delta = 0.0001
natom = ref_wfn.molecule().natom()
mints = core.MintsHelper(ref_wfn)
ecpgradmat = core.Matrix("ECP Gradient", natom, 3)
ecpgradmat.zero()
ecpgrad = np.asarray(ecpgradmat)
Dmat = ref_wfn.Da_subset("AO")
Dmat.add(ref_wfn.Db_subset("AO"))
def displaced_energy(atom, displacement):
mints.basisset().move_atom(atom, displacement)
E = Dmat.vector_dot(mints.ao_ecp())
mints.basisset().move_atom(atom, -1*displacement)
return E
for atom in range(natom):
for xyz in range(3):
transvec = core.Vector3(0.0)
transvec[xyz] += delta
# +1 displacement
Ep1 = displaced_energy(atom, 1*transvec)
# -1 displacement
Em1 = displaced_energy(atom, -1*transvec)
# +2 displacement
Ep2 = displaced_energy(atom, 2*transvec)
# -2 displacement
Em2 = displaced_energy(atom, -2*transvec)
# Evaluate
ecpgrad[atom, xyz] = (Em2 + 8*Ep1 - 8*Em1 - Ep2) / (12*delta)
ecpgradmat.symmetrize_gradient(ref_wfn.molecule())
ecpgradmat.print_atom_vector()
grad.add(ecpgradmat)
grad.print_atom_vector()
ref_wfn.set_print(old_print)
ref_wfn.set_gradient(grad)
ref_wfn.set_variable("SCF TOTAL GRADIENT", grad) # P::e SCF
if ref_wfn.functional().needs_xc():
ref_wfn.set_variable("DFT TOTAL GRADIENT", grad) # overwritten later for DH -- TODO when DH gradients # P::e SCF
else:
ref_wfn.set_variable("HF TOTAL GRADIENT", grad) # P::e SCF
# Shove variables into global space
for k, v in ref_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return ref_wfn
def run_scf_hessian(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an SCF hessian calculation.
"""
optstash = proc_util.scf_set_reference_local(name)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = run_scf(name, **kwargs)
badref = core.get_option('SCF', 'REFERENCE') in ['ROHF', 'CUHF', 'UKS']
badint = core.get_global_option('SCF_TYPE') in [ 'CD', 'OUT_OF_CORE']
if badref or badint:
raise ValidationError("Only RHF/UHF Hessians are currently implemented. SCF_TYPE either CD or OUT_OF_CORE not supported")
if hasattr(ref_wfn, "_disp_functor"):
disp_hess = ref_wfn._disp_functor.compute_hessian(ref_wfn.molecule(), ref_wfn)
ref_wfn.set_variable("-D Hessian", disp_hess)
H = core.scfhess(ref_wfn)
ref_wfn.set_hessian(H)
# Clearly, add some logic when the reach of this fn expands
ref_wfn.set_variable("HF TOTAL HESSIAN", H) # P::e SCF
ref_wfn.set_variable("SCF TOTAL HESSIAN", H) # P::e SCF
core.set_variable("SCF TOTAL HESSIAN", H) # P::e SCF
# Shove variables into global space
for k, v in ref_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return ref_wfn
def run_mcscf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a multiconfigurational self-consistent-field calculation.
"""
# Make sure the molecule the user provided is the active one
mcscf_molecule = kwargs.get('molecule', core.get_active_molecule())
mcscf_molecule.update_geometry()
if 'ref_wfn' in kwargs:
raise ValidationError("It is not possible to pass run_mcscf a reference wavefunction")
new_wfn = core.Wavefunction.build(mcscf_molecule, core.get_global_option('BASIS'))
return core.mcscf(new_wfn)
def run_dfmp2_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DFMP2 gradient calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
if "DF" not in core.get_global_option('SCF_TYPE'):
raise ValidationError('DF-MP2 gradients need DF-SCF reference.')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if ref_wfn.basisset().has_ECP():
raise ValidationError('DF-MP2 gradients with an ECP are not yet available. Use dertype=0 to select numerical gradients.')
core.tstart()
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
dfmp2_wfn = core.dfmp2(ref_wfn)
grad = dfmp2_wfn.compute_gradient()
dfmp2_wfn.set_gradient(grad)
# Shove variables into global space
dfmp2_wfn.set_variable("MP2 TOTAL GRADIENT", grad) # P::e DFMP2
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY'))
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dfmp2_wfn
def run_dfmp2d_gradient(name, **kwargs):
"""Encode MP2-D method."""
dfmp2_wfn = run_dfmp2_gradient('mp2', **kwargs)
wfn_grad = dfmp2_wfn.gradient().clone()
_, _disp_functor = build_disp_functor('MP2D', restricted=True)
disp_grad = _disp_functor.compute_gradient(dfmp2_wfn.molecule(), dfmp2_wfn)
wfn_grad.add(disp_grad)
dfmp2_wfn.set_gradient(wfn_grad)
dfmp2_wfn.set_variable('MP2D CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY') + dfmp2_wfn.variable('DISPERSION CORRECTION ENERGY'))
dfmp2_wfn.set_variable('MP2D TOTAL ENERGY', dfmp2_wfn.variable('MP2D CORRELATION ENERGY') + dfmp2_wfn.variable('HF TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2D TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2D CORRELATION ENERGY'))
# Shove variables into global space
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
return dfmp2_wfn
def run_ccenergy(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a CCSD, CC2, and CC3 calculation.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'])
if name == 'ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD')
core.set_local_option('CCSORT', 'WFN', 'CCSD')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD')
core.set_local_option('CCENERGY', 'WFN', 'CCSD')
elif name == 'ccsd(t)':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD_T')
core.set_local_option('CCSORT', 'WFN', 'CCSD_T')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD_T')
core.set_local_option('CCENERGY', 'WFN', 'CCSD_T')
elif name == 'a-ccsd(t)':
core.set_local_option('TRANSQT2', 'WFN', 'CCSD_AT')
core.set_local_option('CCSORT', 'WFN', 'CCSD_AT')
core.set_local_option('CCTRANSORT', 'WFN', 'CCSD_AT')
core.set_local_option('CCENERGY', 'WFN', 'CCSD_AT')
core.set_local_option('CCHBAR', 'WFN', 'CCSD_AT')
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD_AT')
elif name == 'cc2':
core.set_local_option('TRANSQT2', 'WFN', 'CC2')
core.set_local_option('CCSORT', 'WFN', 'CC2')
core.set_local_option('CCTRANSORT', 'WFN', 'CC2')
core.set_local_option('CCENERGY', 'WFN', 'CC2')
elif name == 'cc3':
core.set_local_option('TRANSQT2', 'WFN', 'CC3')
core.set_local_option('CCSORT', 'WFN', 'CC3')
core.set_local_option('CCTRANSORT', 'WFN', 'CC3')
core.set_local_option('CCENERGY', 'WFN', 'CC3')
elif name == 'eom-cc2':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC2')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCTRANSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC2')
elif name == 'eom-ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CCSD')
core.set_local_option('CCSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCTRANSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CCSD')
# Call a plain energy('ccenergy') and have full control over options, incl. wfn
elif name == 'ccenergy':
pass
# Bypass routine scf if user did something special to get it to converge
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_global_option("CC_TYPE") == "DF":
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
# Obtain semicanonical orbitals
if (core.get_option('SCF', 'REFERENCE') == 'ROHF') and \
((name in ['ccsd(t)', 'a-ccsd(t)', 'cc2', 'cc3', 'eom-cc2', 'eom-cc3']) or
core.get_option('CCTRANSORT', 'SEMICANONICAL')):
ref_wfn.semicanonicalize()
if core.get_global_option('RUN_CCTRANSORT'):
core.cctransort(ref_wfn)
else:
try:
from psi4.driver.pasture import addins
addins.ccsort_transqt2(ref_wfn)
except Exception:
raise PastureRequiredError("RUN_CCTRANSORT")
ccwfn = core.ccenergy(ref_wfn)
if core.get_global_option('PE'):
ccwfn.pe_state = ref_wfn.pe_state
if name == 'a-ccsd(t)':
core.cchbar(ref_wfn)
lambdawfn = core.cclambda(ref_wfn)
for k, v in lambdawfn.variables().items():
ccwfn.set_variable(k, v)
optstash.restore()
return ccwfn
def run_ccenergy_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a CCSD and CCSD(T) gradient calculation.
"""
optstash = p4util.OptionsState(
['GLOBALS', 'DERTYPE'],
['CCLAMBDA', 'WFN'],
['CCDENSITY', 'WFN'])
core.set_global_option('DERTYPE', 'FIRST')
if core.get_global_option('FREEZE_CORE') not in ["FALSE", "0"]:
raise ValidationError('Frozen core is not available for the CC gradients.')
ccwfn = run_ccenergy(name, **kwargs)
if name == 'cc2':
core.set_local_option('CCHBAR', 'WFN', 'CC2')
core.set_local_option('CCLAMBDA', 'WFN', 'CC2')
core.set_local_option('CCDENSITY', 'WFN', 'CC2')
if name == 'ccsd':
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD')
core.set_local_option('CCDENSITY', 'WFN', 'CCSD')
elif name == 'ccsd(t)':
core.set_local_option('CCLAMBDA', 'WFN', 'CCSD_T')
core.set_local_option('CCDENSITY', 'WFN', 'CCSD_T')
core.cchbar(ccwfn)
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
derivobj = core.Deriv(ccwfn)
grad = derivobj.compute()
del derivobj
ccwfn.set_gradient(grad)
ccwfn.set_variable(f"{name.upper()} TOTAL GRADIENT", grad)
core.set_variable(f"{name.upper()} TOTAL GRADIENT", grad)
core.set_variable("CURRENT GRADIENT", grad)
optstash.restore()
return ccwfn
def run_bccd(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a Brueckner CCD calculation.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'])
if name == 'bccd':
core.set_local_option('TRANSQT2', 'WFN', 'BCCD')
core.set_local_option('CCSORT', 'WFN', 'BCCD')
core.set_local_option('CCTRANSORT', 'WFN', 'BCCD')
core.set_local_option('CCENERGY', 'WFN', 'BCCD')
elif name == 'bccd(t)':
core.set_local_option('TRANSQT2', 'WFN', 'BCCD_T')
core.set_local_option('CCSORT', 'WFN', 'BCCD_T')
core.set_local_option('CCENERGY', 'WFN', 'BCCD_T')
core.set_local_option('CCTRANSORT', 'WFN', 'BCCD_T')
core.set_local_option('CCTRIPLES', 'WFN', 'BCCD_T')
else:
raise ValidationError("proc.py:run_bccd name %s not recognized" % name)
# Bypass routine scf if user did something special to get it to converge
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Needed for (T).
if (core.get_option('SCF', 'REFERENCE') == 'ROHF'):
ref_wfn.semicanonicalize()
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
core.set_local_option('CCTRANSORT', 'DELETE_TEI', 'false')
bcc_iter_cnt = 0
if (core.get_global_option("RUN_CCTRANSORT")):
sort_func = core.cctransort
else:
try:
from psi4.driver.pasture import addins
core.set_local_option('TRANSQT2', 'DELETE_TEI', 'false')
sort_func = addins.ccsort_transqt2
except Exception:
raise PastureRequiredError("RUN_CCTRANSORT")
while True:
sort_func(ref_wfn)
ref_wfn = core.ccenergy(ref_wfn)
core.print_out('Brueckner convergence check: %s\n' % bool(core.variable('BRUECKNER CONVERGED')))
if core.variable('BRUECKNER CONVERGED'):
break
if bcc_iter_cnt >= core.get_option('CCENERGY', 'BCCD_MAXITER'):
core.print_out("\n\nWarning! BCCD did not converge within the maximum number of iterations.")
core.print_out("You can increase the number of BCCD iterations by changing BCCD_MAXITER.\n\n")
break
bcc_iter_cnt += 1
if name == 'bccd(t)':
core.cctriples(ref_wfn)
optstash.restore()
return ref_wfn
def run_tdscf_excitations(wfn,**kwargs):
states = core.get_option("SCF","TDSCF_STATES")
# some sanity checks
if sum(states) == 0:
raise ValidationError("TDSCF: No states requested in TDSCF_STATES")
# unwrap 1-membered list of states, regardless of symmetry
# we will apportion states per irrep later on
if len(states) == 1:
states = states[0]
# Tie TDSCF_R_CONVERGENCE to D_CONVERGENCE in SCF reference
if core.has_option_changed('SCF', 'TDSCF_R_CONVERGENCE'):
r_convergence = core.get_option('SCF', 'TDSCF_R_CONVERGENCE')
else:
r_convergence = min(1.e-4, core.get_option('SCF', 'D_CONVERGENCE') * 1.e2)
# "anonymous" return value, as we stash observables in the passed Wavefunction object internally
_ = response.scf_response.tdscf_excitations(wfn,
states=states,
triplets=core.get_option("SCF", "TDSCF_TRIPLETS"),
tda=core.get_option("SCF", "TDSCF_TDA"),
r_convergence=r_convergence,
maxiter=core.get_option("SCF", "TDSCF_MAXITER"),
guess=core.get_option("SCF", "TDSCF_GUESS"),
verbose=core.get_option("SCF", "TDSCF_PRINT"),
coeff_cutoff=core.get_option("SCF", "TDSCF_COEFF_CUTOFF"),
tdm_print=core.get_option("SCF", "TDSCF_TDM_PRINT"))
# Shove variables into global space
for k, v in wfn.variables().items():
core.set_variable(k, v)
return wfn
def run_tdscf_energy(name, **kwargs):
# Get a wfn in case we aren't given one
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
if name is None:
raise ValidationError("TDSCF: No reference wave function!")
else:
ref_wfn = run_scf(name.strip('td-'), **kwargs)
return run_tdscf_excitations(ref_wfn, **kwargs)
def run_scf_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
SCF calculations. This is a simple alias to :py:func:`~proc.run_scf`
since SCF properties all handled through oeprop.
"""
core.tstart()
optstash = proc_util.scf_set_reference_local(name)
properties = kwargs.pop('properties')
# What response do we need?
response_list_vals = list(response.scf_response.property_dicts)
oeprop_list_vals = core.OEProp.valid_methods
oe_properties = []
linear_response = []
unknown_property = []
for prop in properties:
prop = prop.upper()
if prop in response_list_vals:
linear_response.append(prop)
elif (prop in oeprop_list_vals) or ("MULTIPOLE(" in prop):
oe_properties.append(prop)
else:
unknown_property.append(prop)
if "DIPOLE" not in oe_properties:
oe_properties.append("DIPOLE")
# Throw if we dont know what something is
if len(unknown_property):
complete_options = oeprop_list_vals + response_list_vals
alt_method_name = p4util.text.find_approximate_string_matches(unknown_property[0],
complete_options, 2)
alternatives = ""
if len(alt_method_name) > 0:
alternatives = " Did you mean? %s" % (" ".join(alt_method_name))
raise ValidationError("SCF Property: Feature '%s' is not recognized. %s" % (unknown_property[0], alternatives))
# Validate OEProp
if len(oe_properties):
proc_util.oeprop_validator(oe_properties)
if len(linear_response):
optstash_jk = p4util.OptionsState(["SAVE_JK"])
core.set_global_option("SAVE_JK", True)
# Compute the Wavefunction
scf_wfn = run_scf(name, scf_do_properties=False, do_timer=False, **kwargs)
# Run OEProp
oe = core.OEProp(scf_wfn)
oe.set_title(name.upper())
for prop in oe_properties:
oe.add(prop.upper())
oe.compute()
scf_wfn.oeprop = oe
# Always must set SCF dipole (retire components at v1.5)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for cart in ["X", "Y", "Z"]:
core.set_variable("SCF DIPOLE " + cart, core.variable(name + " DIPOLE " + cart))
core.set_variable("SCF DIPOLE", core.variable(name + " DIPOLE")) # P::e SCF
# Run Linear Respsonse
if len(linear_response):
core.prepare_options_for_module("SCF")
ret = response.scf_response.cpscf_linear_response(scf_wfn, *linear_response,
conv_tol = core.get_global_option("SOLVER_CONVERGENCE"),
max_iter = core.get_global_option("SOLVER_MAXITER"),
print_lvl = (core.get_global_option("PRINT") + 1))
optstash_jk.restore()
core.tstop()
optstash.restore()
return scf_wfn
def run_cc_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
all CC property calculations.
"""
optstash = p4util.OptionsState(
['WFN'],
['DERTYPE'],
['ONEPDM'],
['PROPERTY'],
['CCLAMBDA', 'R_CONVERGENCE'],
['CCEOM', 'R_CONVERGENCE'],
['CCEOM', 'E_CONVERGENCE']) # yapf:disable
oneel_properties = core.OEProp.valid_methods
twoel_properties = []
response_properties = ['POLARIZABILITY', 'ROTATION', 'ROA', 'ROA_TENSOR']
excited_properties = ['OSCILLATOR_STRENGTH', 'ROTATIONAL_STRENGTH']
one = []
two = []
response = []
excited = []
invalid = []
if 'properties' in kwargs:
properties = kwargs['properties']
for prop in properties:
prop = prop.upper()
if prop in oneel_properties:
one.append(prop)
elif prop in twoel_properties:
two.append(prop)
elif prop in response_properties:
response.append(prop)
elif prop in excited_properties:
excited.append(prop)
else:
invalid.append(prop)
else:
raise ValidationError("""The "properties" keyword is required with the property() function.""")
# People are used to requesting dipole/quadrupole and getting dipole,quadrupole,mulliken_charges and NO_occupations
if ('DIPOLE' in one) or ('QUADRUPOLE' in one):
one = list(set(one + ['DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'NO_OCCUPATIONS']))
n_one = len(one)
n_two = len(two)
n_response = len(response)
n_excited = len(excited)
n_invalid = len(invalid)
if n_invalid > 0:
print("""The following properties are not currently supported: %s""" % invalid)
if n_excited > 0 and (name not in ['eom-ccsd', 'eom-cc2']):
raise ValidationError("""Excited state CC properties require EOM-CC2 or EOM-CCSD.""")
if (name in ['eom-ccsd', 'eom-cc2']) and n_response > 0:
raise ValidationError("""Cannot (yet) compute response properties for excited states.""")
if 'roa' in response:
# Perform distributed roa job
run_roa(name, **kwargs)
return # Don't do anything further
if (n_one > 0 or n_two > 0) and (n_response > 0):
print("""Computing both density- and response-based properties.""")
if n_response > 0:
if ("ref_wfn" in kwargs and not kwargs["ref_wfn"].same_a_b_orbs()) or core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError(f"Non-RHF CC response properties are not implemented.")
if name in ['ccsd', 'cc2', 'eom-ccsd', 'eom-cc2']:
this_name = name.upper().replace('-', '_')
core.set_global_option('WFN', this_name)
ccwfn = run_ccenergy(name, **kwargs)
core.set_global_option('WFN', this_name)
else:
raise ValidationError(f"CC property name {name.upper()} not recognized")
# Need cchbar for everything
core.cchbar(ccwfn)
# Need ccdensity at this point only for density-based props
if n_one > 0 or n_two > 0:
if name == 'eom-ccsd':
core.set_global_option('WFN', 'EOM_CCSD')
core.set_global_option('DERTYPE', 'NONE')
core.cceom(ccwfn)
elif name == 'eom-cc2':
core.set_global_option('WFN', 'EOM_CC2')
core.set_global_option('DERTYPE', 'NONE')
core.cceom(ccwfn)
core.set_global_option('DERTYPE', 'NONE')
if core.get_option('CCDENSITY', 'OPDM_RELAX') or n_two > 0:
# WARNING!!! A one-particle property computed _with_ a two-particle property will differ
# from a one-particle property computed by itself. There are no two-particle properties at
# present, so we can kick the issue further down the road.
core.set_global_option('OPDM_ONLY', 'FALSE')
else:
core.set_global_option('OPDM_ONLY', 'TRUE')
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
# Need ccresponse only for response-type props
if n_response > 0:
core.set_global_option('DERTYPE', 'RESPONSE')
core.cclambda(ccwfn)
for prop in response:
core.set_global_option('PROPERTY', prop)
core.ccresponse(ccwfn)
# Excited-state transition properties
if n_excited > 0:
if name == 'eom-ccsd':
core.set_global_option('WFN', 'EOM_CCSD')
elif name == 'eom-cc2':
core.set_global_option('WFN', 'EOM_CC2')
else:
raise ValidationError("""Unknown excited-state CC wave function.""")
core.set_global_option('DERTYPE', 'NONE')
if core.get_option('CCDENSITY', 'OPDM_RELAX'):
core.set_global_option('OPDM_ONLY', 'FALSE')
else:
core.set_global_option('OPDM_ONLY', 'TRUE')
# Tight convergence unnecessary for transition properties
core.set_local_option('CCLAMBDA', 'R_CONVERGENCE', 1e-4)
core.set_local_option('CCEOM', 'R_CONVERGENCE', 1e-4)
core.set_local_option('CCEOM', 'E_CONVERGENCE', 1e-5)
core.cceom(ccwfn)
core.cclambda(ccwfn)
core.ccdensity(ccwfn)
if n_one > 0:
# call oe prop for GS density
oe = core.OEProp(ccwfn)
oe.set_title(name.upper())
for oe_name in one:
oe.add(oe_name.upper())
oe.compute()
# call oe prop for each ES density
if name.startswith('eom'):
# copy GS CC DIP/QUAD ... to CC ROOT 0 DIP/QUAD ... if we are doing multiple roots
# retire components at v1.5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if 'dipole' in one:
core.set_variable("CC ROOT 0 DIPOLE X", core.variable("CC DIPOLE X"))
core.set_variable("CC ROOT 0 DIPOLE Y", core.variable("CC DIPOLE Y"))
core.set_variable("CC ROOT 0 DIPOLE Z", core.variable("CC DIPOLE Z"))
if 'quadrupole' in one:
core.set_variable("CC ROOT 0 QUADRUPOLE XX", core.variable("CC QUADRUPOLE XX"))
core.set_variable("CC ROOT 0 QUADRUPOLE XY", core.variable("CC QUADRUPOLE XY"))
core.set_variable("CC ROOT 0 QUADRUPOLE XZ", core.variable("CC QUADRUPOLE XZ"))
core.set_variable("CC ROOT 0 QUADRUPOLE YY", core.variable("CC QUADRUPOLE YY"))
core.set_variable("CC ROOT 0 QUADRUPOLE YZ", core.variable("CC QUADRUPOLE YZ"))
core.set_variable("CC ROOT 0 QUADRUPOLE ZZ", core.variable("CC QUADRUPOLE ZZ"))
if 'dipole' in one:
core.set_variable("CC ROOT 0 DIPOLE", core.variable("CC DIPOLE"))
# core.set_variable("CC ROOT n DIPOLE", core.variable("CC DIPOLE")) # P::e CCENERGY
if 'quadrupole' in one:
core.set_variable("CC ROOT 0 QUADRUPOLE", core.variable("CC QUADRUPOLE"))
# core.set_variable("CC ROOT n QUADRUPOLE", core.variable("CC QUADRUPOLE")) # P::e CCENERGY
n_root = sum(core.get_global_option("ROOTS_PER_IRREP"))
for rn in range(n_root):
oe.set_title("CC ROOT {}".format(rn + 1))
Da = ccwfn.variable("CC ROOT {} Da".format(rn + 1))
oe.set_Da_so(Da)
if core.get_global_option("REFERENCE") == "UHF":
Db = ccwfn.variable("CC ROOT {} Db".format(rn + 1))
oe.set_Db_so(Db)
oe.compute()
core.set_global_option('WFN', 'SCF')
core.revoke_global_option_changed('WFN')
core.set_global_option('DERTYPE', 'NONE')
core.revoke_global_option_changed('DERTYPE')
optstash.restore()
return ccwfn
def run_dfmp2_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DFMP2 property calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['ONEPDM'],
['OPDM_RELAX'],
['SCF_TYPE'])
core.set_global_option('ONEPDM', 'TRUE')
core.set_global_option('OPDM_RELAX', 'TRUE')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF') # local set insufficient b/c SCF option read in DFMP2
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
if 'DF' not in core.get_global_option('SCF_TYPE'):
raise ValidationError('DF-MP2 properties need DF-SCF reference.')
properties = kwargs.pop('properties')
proc_util.oeprop_validator(properties)
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, scf_do_properties=False, use_c1=True, **kwargs) # C1 certified
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
core.tstart()
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
dfmp2_wfn = core.dfmp2(ref_wfn)
grad = dfmp2_wfn.compute_gradient()
if name == 'scs-mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('SCS-MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('SCS-MP2 CORRELATION ENERGY'))
elif name == 'mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY'))
# Run OEProp
oe = core.OEProp(dfmp2_wfn)
oe.set_title(name.upper())
for prop in properties:
oe.add(prop.upper())
oe.compute()
dfmp2_wfn.oeprop = oe
# Shove variables into global space
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dfmp2_wfn
def _clean_detci(keep: bool=True):
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
cifl = core.get_option("DETCI", "CI_FILE_START")
for fl in range(cifl, cifl + 4):
if psio.open_check(fl):
psio.close(fl, keep)
def run_detci_property(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a configuration interaction calculation, namely FCI,
CIn, MPn, and ZAPTn, computing properties.
"""
optstash = p4util.OptionsState(
['OPDM'],
['TDM'])
# Find valid properties
valid_transition = ['TRANSITION_DIPOLE', 'TRANSITION_QUADRUPOLE']
ci_prop = []
ci_trans = []
properties = kwargs.pop('properties')
for prop in properties:
if prop.upper() in valid_transition:
ci_trans.append(prop)
else:
ci_prop.append(prop)
proc_util.oeprop_validator(ci_prop)
core.set_global_option('OPDM', 'TRUE')
if len(ci_trans):
core.set_global_option('TDM', 'TRUE')
# Compute
if name in ['mcscf', 'rasscf', 'casscf']:
ciwfn = run_detcas(name, **kwargs)
else:
ciwfn = run_detci(name, **kwargs)
# All property names are just CI
if 'CI' in name.upper():
name = 'CI'
states = core.get_global_option('avg_states')
nroots = core.get_global_option('num_roots')
if len(states) != nroots:
states = range(nroots)
# Run OEProp
oe = core.OEProp(ciwfn)
oe.set_title(name.upper())
for prop in ci_prop:
oe.add(prop.upper())
# Compute "the" CI density
oe.compute()
ciwfn.oeprop = oe
# If we have more than one root, compute all data
if nroots > 1:
core.print_out("\n ===> %s properties for all CI roots <=== \n\n" % name.upper())
for root in states:
oe.set_title("%s ROOT %d" % (name.upper(), root))
if ciwfn.same_a_b_dens():
oe.set_Da_mo(ciwfn.get_opdm(root, root, "A", True))
else:
oe.set_Da_mo(ciwfn.get_opdm(root, root, "A", True))
oe.set_Db_mo(ciwfn.get_opdm(root, root, "B", True))
oe.compute()
# Transition density matrices
if (nroots > 1) and len(ci_trans):
oe.clear()
for tprop in ci_trans:
oe.add(tprop.upper())
core.print_out("\n ===> %s properties for all CI transition density matrices <=== \n\n" % name.upper())
for root in states[1:]:
oe.set_title("%s ROOT %d -> ROOT %d" % (name.upper(), 0, root))
if ciwfn.same_a_b_dens():
oe.set_Da_mo(ciwfn.get_opdm(0, root, "A", True))
else:
oe.set_Da_mo(ciwfn.get_opdm(0, root, "A", True))
oe.set_Db_mo(ciwfn.get_opdm(0, root, "B", True))
oe.compute()
_clean_detci()
optstash.restore()
return ciwfn
def run_eom_cc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an EOM-CC calculation, namely EOM-CC2, EOM-CCSD, and EOM-CC3.
"""
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['CCSORT', 'WFN'],
['CCENERGY', 'WFN'],
['CCHBAR', 'WFN'],
['CCEOM', 'WFN'])
if name == 'eom-ccsd':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CCSD')
core.set_local_option('CCSORT', 'WFN', 'EOM_CCSD')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CCSD')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CCSD')
core.set_local_option('CCEOM', 'WFN', 'EOM_CCSD')
ref_wfn = run_ccenergy('ccsd', **kwargs)
elif name == 'eom-cc2':
user_ref = core.get_option('CCENERGY', 'REFERENCE')
if (user_ref != 'RHF') and (user_ref != 'UHF'):
raise ValidationError('Reference %s for EOM-CC2 is not available.' % user_ref)
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC2')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC2')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC2')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CC2')
core.set_local_option('CCEOM', 'WFN', 'EOM_CC2')
ref_wfn = run_ccenergy('cc2', **kwargs)
elif name == 'eom-cc3':
core.set_local_option('TRANSQT2', 'WFN', 'EOM_CC3')
core.set_local_option('CCSORT', 'WFN', 'EOM_CC3')
core.set_local_option('CCENERGY', 'WFN', 'EOM_CC3')
core.set_local_option('CCHBAR', 'WFN', 'EOM_CC3')
core.set_local_option('CCEOM', 'WFN', 'EOM_CC3')
ref_wfn = run_ccenergy('cc3', **kwargs)
core.cchbar(ref_wfn)
core.cceom(ref_wfn)
optstash.restore()
return ref_wfn
# TODO ask if all these cc modules not actually changing wfn
def run_eom_cc_gradient(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an EOM-CCSD gradient calculation.
"""
optstash = p4util.OptionsState(
['CCDENSITY', 'XI'],
['CCDENSITY', 'ZETA'],
['CCLAMBDA', 'ZETA'],
['DERTYPE'],
['CCDENSITY', 'WFN'],
['CCLAMBDA', 'WFN'])
core.set_global_option('DERTYPE', 'FIRST')
if name == 'eom-ccsd':
core.set_local_option('CCLAMBDA', 'WFN', 'EOM_CCSD')
core.set_local_option('CCDENSITY', 'WFN', 'EOM_CCSD')
ref_wfn = run_eom_cc(name, **kwargs)
else:
core.print_out('DGAS: proc.py:1599 hitting an undefined sequence')
core.clean()
raise ValueError('Hit a wall in proc.py:1599')
core.set_local_option('CCLAMBDA', 'ZETA', 'FALSE')
core.set_local_option('CCDENSITY', 'ZETA', 'FALSE')
core.set_local_option('CCDENSITY', 'XI', 'TRUE')
core.cclambda(ref_wfn)
core.ccdensity(ref_wfn)
core.set_local_option('CCLAMBDA', 'ZETA', 'TRUE')
core.set_local_option('CCDENSITY', 'ZETA', 'TRUE')
core.set_local_option('CCDENSITY', 'XI', 'FALSE')
core.cclambda(ref_wfn)
core.ccdensity(ref_wfn)
derivobj = core.Deriv(ref_wfn)
grad = derivobj.compute()
ref_wfn.set_gradient(grad)
optstash.restore()
return ref_wfn
def run_adc_deprecated(*args, **kwargs):
warnings.warn("The method 'adc' has been deprecated, please use 'adc2' instead."
"The method key 'adc' will be removed Psi4 1.6.", DeprecationWarning)
return select_adc2(*args, **kwargs)
def run_adc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an algebraic diagrammatic construction calculation.
.. caution:: Get rid of active molecule lines- should be handled in energy.
"""
if core.get_option('ADC', 'REFERENCE') != 'RHF':
raise ValidationError('ADC requires reference RHF')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
warnings.warn("Using built-in `adc` module instead of add-on `adcc` interface is deprecated due "
"to certain wrong results, and in 1.7, it will stop working.", category=FutureWarning)
error_msg = ("\n\t\t\t\t!!!!! WARNING !!!!!\n" +
"\t\tThe built-in ADC(2) method may give incorrect results if\n"
"\t\tmultiple roots are requested, due to an error in the Davidson solver,\n"
"\t\tand is no longer maintained. It is slated for removal in Psi4 1.7.\n"
"\t\tUse of the Psi interface to `adcc` instead is strongly recommended.\n")
core.print_out(error_msg)
wfn = core.adc(ref_wfn)
core.print_out(error_msg)
return wfn
def run_adcc(name, **kwargs):
"""Prepare and run an ADC calculation in adcc, interpret the result and return
as a wavefunction.
"""
# TODO Maybe it would improve readability if this function was spilt
# up and the whole thing went to a separate file (like for sapt,
# interface_cfour.py, ...
try:
import adcc
from adcc.exceptions import InvalidReference
except ModuleNotFoundError:
raise ValidationError("adcc extras qc_module not available. Try installing "
"via 'pip install adcc' or 'conda install -c adcc adcc'.")
if core.get_option('ADC', 'REFERENCE') not in ["RHF", "UHF"]:
raise ValidationError('adcc requires reference RHF or UHF')
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.pop('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs)
# Start timer
do_timer = kwargs.pop("do_timer", True)
if do_timer:
core.tstart()
#
# Build kwargs for adcc
#
kwargs.pop("molecule", None)
if ref_wfn.frzcpi()[0] > 0:
kwargs["frozen_core"] = ref_wfn.frzcpi()[0]
if ref_wfn.frzvpi()[0] > 0:
kwargs["frozen_virtual"] = ref_wfn.frzvpi()[0]
if core.get_option("ADC", "NUM_CORE_ORBITALS"):
kwargs["core_orbitals"] = core.get_option("ADC", "NUM_CORE_ORBITALS")
scf_accuracy = max(core.get_option("SCF", "E_CONVERGENCE"),
core.get_option("SCF", "D_CONVERGENCE"))
if core.get_option("ADC", "R_CONVERGENCE") < 0:
kwargs["conv_tol"] = max(100 * scf_accuracy, 1e-6)
else:
kwargs["conv_tol"] = core.get_option("ADC", "R_CONVERGENCE")
n_roots = core.get_option('ADC', 'ROOTS_PER_IRREP')
if len(n_roots) > 1:
raise ValidationError("adcc can only deal with a single irrep.")
kwargs["n_states"] = n_roots[0]
if core.get_option("ADC", "NUM_GUESSES") > 0:
kwargs["n_guesses"] = core.get_option("ADC", "NUM_GUESSES")
if core.get_option("ADC", "MAX_NUM_VECS") > 0:
kwargs["max_subspace"] = core.get_option("ADC", "MAX_NUM_VECS")
kind = core.get_option("ADC", "KIND").lower()
if isinstance(ref_wfn, core.UHF):
if not core.has_option_changed("ADC", "KIND"):
kind = "any"
elif kind not in ["any", "spin_flip"]:
raise ValidationError("For UHF references the only valid values for 'KIND' are "
"'SPIN_FLIP' or 'ANY' and not '{}.".format(kind.upper()))
elif kind not in ["singlet", "triplet", "any"]:
raise ValidationError("For RHF references the value '{}' for 'KIND' is "
"not supported.".format(kind.upper()))
kwargs["kind"] = kind
kwargs["max_iter"] = core.get_option("ADC", "MAXITER")
#
# Determine ADC function method from adcc to run ADC
#
adcrunner = {
"cvs-adc(1)": adcc.cvs_adc1, "cvs-adc(2)": adcc.cvs_adc2,
"cvs-adc(2)-x": adcc.cvs_adc2x, "cvs-adc(3)": adcc.cvs_adc3,
"adc(1)": adcc.adc1, "adc(2)": adcc.adc2,
"adc(2)-x": adcc.adc2x, "adc(3)": adcc.adc3,
}
if name not in adcrunner:
raise ValidationError(f"Unsupported ADC method: {name}")
if "cvs" in name and "core_orbitals" not in kwargs:
raise ValidationError("If a CVS-ADC method is requested, the NUM_CORE_ORBITALS option "
"needs to be set.")
if "core_orbitals" in kwargs and "cvs" not in name:
raise ValidationError("The NUM_CORE_ORBITALS option needs to be set to '0' or absent "
"unless a CVS ADC method is requested.")
if "cvs" in name and kwargs["kind"] in ["spin_flip"]:
raise ValidationError("Spin-flip for CVS-ADC variants is not available.")
#
# Check for unsupported options
#
for option in ["PR", "NORM_TOLERANCE", "POLE_MAXITER", "SEM_MAXITER",
"NEWTON_CONVERGENCE", "MEMORY", "CACHELEVEL", "NUM_AMPS_PRINT"]:
if core.has_option_changed("ADC", option):
raise ValidationError(f"ADC backend adcc does not support option '{option}'")
#
# Launch the rocket
#
# Copy thread setup from psi4
try:
adcc.set_n_threads(core.get_num_threads())
except AttributeError:
# Before adcc 0.13.3:
adcc.thread_pool.reinit(core.get_num_threads(), core.get_num_threads())
# Hack to direct the stream-like interface adcc expects to the string interface of Psi4 core
class CoreStream:
def write(self, text):
core.print_out(text)
core.print_out("\n" + adcc.banner(colour=False) + "\n")
try:
state = adcrunner[name](ref_wfn, **kwargs, output=CoreStream())
except InvalidReference as ex:
raise ValidationError("Cannot run adcc because the passed reference wavefunction is "
"not supported in adcc. Check Psi4 SCF parameters. adcc reports: "
f"{ex}")
except Exception as ex:
raise ValidationError("Unknown exception occured while "
f"running adcc: '{ex}' ({type(ex).__name__})")
core.print_out("\n")
# TODO Should a non-converged calculation throw?
#
# Interpret results
#
# Note: This wavefunction is not consistent ... the density
# is e.g. not the proper one (i.e. not the MP(n) one)
adc_wfn = core.Wavefunction(ref_wfn.molecule(), ref_wfn.basisset())
adc_wfn.shallow_copy(ref_wfn)
adc_wfn.set_reference_wavefunction(ref_wfn)
adc_wfn.set_name(name)
adc_wfn.set_module("adcc")
# MP(3) energy for CVS-ADC(3) calculations is still a missing feature in adcc
# ... we store this variant here to be able to fall back to MP(2) energies.
is_cvs_adc3 = state.method.level >= 3 and state.ground_state.has_core_occupied_space
# Ground-state energies
mp = state.ground_state
mp_energy = mp.energy(state.method.level if not is_cvs_adc3 else 2)
mp_corr = 0.0
if state.method.level > 1:
core.print_out("Ground state energy breakdown:\n")
core.print_out(" Energy SCF {0:15.8g} [Eh]\n".format(ref_wfn.energy()))
for level in range(2, state.method.level + 1):
if level >= 3 and is_cvs_adc3:
continue
energy = mp.energy_correction(level)
mp_corr += energy
adc_wfn.set_variable(f"MP{level} CORRELATION ENERGY", energy)
adc_wfn.set_variable(f"MP{level} TOTAL ENERGY", mp.energy(level))
core.print_out(f" Energy correlation MP{level} {energy:15.8g} [Eh]\n")
core.print_out(" Energy total {0:15.8g} [Eh]\n".format(mp_energy))
adc_wfn.set_variable("CURRENT CORRELATION ENERGY", mp_corr) # P::e ADC
adc_wfn.set_variable("CURRENT ENERGY", mp_energy) # P::e ADC
# Set results of excited-states computation
# TODO Does not work: Can't use strings
# adc_wfn.set_variable("excitation kind", state.kind)
adc_wfn.set_variable("ADC ITERATIONS", state.n_iter) # P::e ADC
adc_wfn.set_variable(name + " excitation energies",
core.Matrix.from_array(state.excitation_energy.reshape(-1, 1)))
adc_wfn.set_variable("number of excited states", len(state.excitation_energy))
core.print_out("\n\n ==> Excited states summary <== \n")
core.print_out("\n" + state.describe(oscillator_strengths=False) + "\n")
# TODO Setting the excitation amplitude elements inside the wavefunction is a little
# challenging, since for each excitation vector one needs to extract the elements
# and map the indices from the adcc to the Psi4 convention. For this reason it
# is not yet done.
core.print_out("\n ==> Dominant amplitudes per state <== \n\n")
tol_ampl = core.get_option("ADC", "CUTOFF_AMPS_PRINT")
core.print_out(state.describe_amplitudes(tolerance=tol_ampl) + "\n\n")
# Shove variables into global space
for k, v in adc_wfn.variables().items():
core.set_variable(k, v)
if do_timer:
core.tstop()
adc_wfn.adcc_state = state
return adc_wfn
def run_adcc_property(name, **kwargs):
"""Run a ADC excited-states property calculation in adcc
and return the resulting properties.
"""
# TODO Things available in ADCC, but not yet implemented here:
# Export of difference and transition density matrices for all states
properties = [prop.upper() for prop in kwargs.pop('properties')]
valid_properties = ['DIPOLE', 'OSCILLATOR_STRENGTH', 'TRANSITION_DIPOLE',
'ROTATIONAL_STRENGTH']
unknown_properties = [prop for prop in properties if prop not in valid_properties]
if unknown_properties:
alternatives = ""
alt_method_name = p4util.text.find_approximate_string_matches(unknown_properties[0],
valid_properties, 2)
if alt_method_name:
alternatives = " Did you mean? " + " ".join(alt_method_name)
raise ValidationError("ADC property: Feature '{}' is not recognized. {}"
"".format(unknown_properties[0], alternatives))
# Start timer
do_timer = kwargs.pop("do_timer", True)
if do_timer:
core.tstart()
adc_wfn = run_adcc(name, do_timer=False, **kwargs)
state = adc_wfn.adcc_state
hf = state.reference_state
mp = state.ground_state
# Formats and indention
ind = " "
def format_vector(label, data):
assert data.ndim == 1
return f"{label:<40s} " + " ".join(f"{d:12.6g}" for d in data)
if "DIPOLE" in properties:
lines = ["\nGround state properties"]
lines += [ind + "Hartree-Fock (HF)"]
lines += [ind + ind + format_vector("Dipole moment (in a.u.)", hf.dipole_moment)]
if state.method.level > 1:
lines += [ind + "Møller Plesset 2nd order (MP2)"]
lines += [ind + ind + format_vector("Dipole moment (in a.u.)", mp.dipole_moment(2))]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i, cart in enumerate(["X", "Y", "Z"]):
# retire components at v1.5
adc_wfn.set_variable("MP2 dipole " + cart, mp.dipole_moment(2)[i])
adc_wfn.set_variable("current dipole " + cart, mp.dipole_moment(2)[i])
adc_wfn.set_variable("MP2 dipole", mp.dipole_moment(2))
adc_wfn.set_variable("current dipole", mp.dipole_moment(2))
lines += [""]
core.print_out("\n".join(lines) + "\n")
gauge = core.get_option("ADC", "GAUGE").lower()
if gauge == "velocity":
gauge_short = "VEL"
elif gauge == "length":
gauge_short = "LEN"
else:
raise ValidationError(f"Gauge {gauge} not recognised for ADC calculations.")
computed = {}
if any(prop in properties for prop in ("TRANSITION_DIPOLE", "OSCILLATOR_STRENGTH")):
data = state.transition_dipole_moment
computed["Transition dipole moment (in a.u.)"] = data
adc_wfn.set_variable(f"{name} transition dipoles", core.Matrix.from_array(data))
if "OSCILLATOR_STRENGTH" in properties:
if gauge == "velocity":
data = state.oscillator_strength_velocity.reshape(-1, 1)
else:
data = state.oscillator_strength.reshape(-1, 1)
computed[f"Oscillator strength ({gauge} gauge)"] = data
adc_wfn.set_variable(f"{name} oscillator strengths ({gauge_short})",
core.Matrix.from_array(data))
if "ROTATIONAL_STRENGTH" in properties:
data = state.rotatory_strength.reshape(-1, 1)
computed["Rotational strength (velocity gauge)"] = data
adc_wfn.set_variable(f"{name} rotational strengths (VEL)",
core.Matrix.from_array(data))
if "DIPOLE" in properties:
data = state.state_dipole_moment
computed["State dipole moment (in a.u.)"] = data
adc_wfn.set_variable(f"{name} state dipoles", core.Matrix.from_array(data))
core.print_out("\nExcited state properties:\n")
n_states = adc_wfn.variable("number of excited states")
for i in range(int(n_states)):
lines = [ind + f"Excited state {i}"]
for prop, data in sorted(computed.items()):
lines += [ind + ind + format_vector(prop, data[i])]
core.print_out("\n".join(lines) + "\n")
# Shove variables into global space
for k, v in adc_wfn.variables().items():
core.set_variable(k, v)
if do_timer:
core.tstop()
return adc_wfn
def run_detci(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a configuration interaction calculation, namely FCI,
CIn, MPn, and ZAPTn.
"""
optstash = p4util.OptionsState(
['DETCI', 'WFN'],
['DETCI', 'MAX_NUM_VECS'],
['DETCI', 'MPN_ORDER_SAVE'],
['DETCI', 'MPN'],
['DETCI', 'FCI'],
['DETCI', 'EX_LEVEL'])
if core.get_option('DETCI', 'REFERENCE') not in ['RHF', 'ROHF']:
raise ValidationError('Reference %s for DETCI is not available.' %
core.get_option('DETCI', 'REFERENCE'))
if name == 'zapt':
core.set_local_option('DETCI', 'WFN', 'ZAPTN')
level = kwargs['level']
maxnvect = int((level + 1) / 2) + (level + 1) % 2
core.set_local_option('DETCI', 'MAX_NUM_VECS', maxnvect)
if (level + 1) % 2:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 2)
else:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 1)
elif name in ['mp', 'mp2', 'mp3', 'mp4']:
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'MPN', 'TRUE')
if name == 'mp2':
level = 2
elif name == 'mp3':
level = 3
elif name == 'mp4':
level = 4
else:
level = kwargs['level']
maxnvect = int((level + 1) / 2) + (level + 1) % 2
core.set_local_option('DETCI', 'MAX_NUM_VECS', maxnvect)
if (level + 1) % 2:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 2)
else:
core.set_local_option('DETCI', 'MPN_ORDER_SAVE', 1)
elif name == 'ccsd':
# untested
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'CC', 'TRUE')
core.set_local_option('DETCI', 'CC_EX_LEVEL', 2)
elif name == 'fci':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'FCI', 'TRUE')
elif name == 'cisd':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 2)
elif name == 'cisdt':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 3)
elif name == 'cisdtq':
core.set_local_option('DETCI', 'WFN', 'DETCI')
core.set_local_option('DETCI', 'EX_LEVEL', 4)
elif name == 'ci':
core.set_local_option('DETCI', 'WFN', 'DETCI')
level = kwargs['level']
core.set_local_option('DETCI', 'EX_LEVEL', level)
elif name == 'detci':
pass
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
ciwfn = core.detci(ref_wfn)
# Shove variables into global space
for k, v in ciwfn.variables().items():
core.set_variable(k, v)
print_nos = False
if core.get_option("DETCI", "NAT_ORBS"):
ciwfn.ci_nat_orbs()
print_nos = True
proc_util.print_ci_results(ciwfn, name.upper(), ciwfn.variable("HF TOTAL ENERGY"), ciwfn.variable("CURRENT ENERGY"), print_nos)
core.print_out("\t\t \"A good bug is a dead bug\" \n\n")
core.print_out("\t\t\t - Starship Troopers\n\n")
core.print_out("\t\t \"I didn't write FORTRAN. That's the problem.\"\n\n")
core.print_out("\t\t\t - Edward Valeev\n")
if core.get_global_option("DIPMOM") and ("mp" not in name.lower()):
# We always would like to print a little dipole information
oeprop = core.OEProp(ciwfn)
oeprop.set_title(name.upper())
oeprop.add("DIPOLE")
oeprop.compute()
ciwfn.oeprop = oeprop
# retire components in v1.5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
core.set_variable("CURRENT DIPOLE X", core.variable(name.upper() + " DIPOLE X"))
core.set_variable("CURRENT DIPOLE Y", core.variable(name.upper() + " DIPOLE Y"))
core.set_variable("CURRENT DIPOLE Z", core.variable(name.upper() + " DIPOLE Z"))
core.set_variable("CURRENT DIPOLE", core.variable(name.upper() + " DIPOLE"))
ciwfn.cleanup_ci()
ciwfn.cleanup_dpd()
_clean_detci()
optstash.restore()
return ciwfn
def run_dfmp2(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted MP2 calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
core.tstart()
core.print_out('\n')
p4util.banner('DFMP2')
core.print_out('\n')
if core.get_global_option('REFERENCE') == "ROHF":
ref_wfn.semicanonicalize()
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DFMP2", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
dfmp2_wfn = core.dfmp2(ref_wfn)
dfmp2_wfn.compute_energy()
if name == 'scs-mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('SCS-MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('SCS-MP2 CORRELATION ENERGY'))
elif name == 'mp2':
dfmp2_wfn.set_variable('CURRENT ENERGY', dfmp2_wfn.variable('MP2 TOTAL ENERGY'))
dfmp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dfmp2_wfn.variable('MP2 CORRELATION ENERGY'))
# Shove variables into global space
for k, v in dfmp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dfmp2_wfn
def run_dfep2(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a density-fitted MP2 calculation.
"""
core.tstart()
optstash = p4util.OptionsState(
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if core.get_global_option('REFERENCE') != "RHF":
raise ValidationError("DF-EP2 is not available for %s references.",
core.get_global_option('REFERENCE'))
# Build the wavefunction
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_EP2",
core.get_option("DFEP2", "DF_BASIS_EP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_EP2", aux_basis)
dfep2_wfn = core.DFEP2Wavefunction(ref_wfn)
# Figure out what were doing
if core.has_option_changed('DFEP2', 'EP2_ORBITALS'):
ep2_input = core.get_global_option("EP2_ORBITALS")
else:
n_ip = core.get_global_option("EP2_NUM_IP")
n_ea = core.get_global_option("EP2_NUM_EA")
eps = np.hstack(dfep2_wfn.epsilon_a().nph)
irrep_map = np.hstack([np.ones_like(dfep2_wfn.epsilon_a().nph[x]) * x for x in range(dfep2_wfn.nirrep())])
sort = np.argsort(eps)
ip_map = sort[dfep2_wfn.nalpha() - n_ip:dfep2_wfn.nalpha()]
ea_map = sort[dfep2_wfn.nalpha():dfep2_wfn.nalpha() + n_ea]
ep2_input = [[] for x in range(dfep2_wfn.nirrep())]
nalphapi = tuple(dfep2_wfn.nalphapi())
# Add IP info
ip_info = np.unique(irrep_map[ip_map], return_counts=True)
for irrep, cnt in zip(*ip_info):
irrep = int(irrep)
ep2_input[irrep].extend(range(nalphapi[irrep] - cnt, nalphapi[irrep]))
# Add EA info
ea_info = np.unique(irrep_map[ea_map], return_counts=True)
for irrep, cnt in zip(*ea_info):
irrep = int(irrep)
ep2_input[irrep].extend(range(nalphapi[irrep], nalphapi[irrep] + cnt))
# Compute
ret = dfep2_wfn.compute(ep2_input)
# Resort it...
ret_eps = []
for h in range(dfep2_wfn.nirrep()):
ep2_data = ret[h]
inp_data = ep2_input[h]
for i in range(len(ep2_data)):
tmp = [h, ep2_data[i][0], ep2_data[i][1], dfep2_wfn.epsilon_a().get(h, inp_data[i]), inp_data[i]]
ret_eps.append(tmp)
ret_eps.sort(key=lambda x: x[3])
h2ev = constants.hartree2ev
irrep_labels = dfep2_wfn.molecule().irrep_labels()
core.print_out(" ==> Results <==\n\n")
core.print_out(" %8s %12s %12s %8s\n" % ("Orbital", "Koopmans (eV)", "EP2 (eV)", "EP2 PS"))
core.print_out(" ----------------------------------------------\n")
for irrep, ep2, ep2_ps, kt, pos in ret_eps:
label = str(pos + 1) + irrep_labels[irrep]
core.print_out(" %8s % 12.3f % 12.3f % 6.3f\n" % (label, (kt * h2ev), (ep2 * h2ev), ep2_ps))
core.set_variable("EP2 " + label.upper() + " ENERGY", ep2)
core.print_out(" ----------------------------------------------\n\n")
# Figure out the IP and EA
sorted_vals = np.array([x[1] for x in ret_eps])
ip_vals = sorted_vals[sorted_vals < 0]
ea_vals = sorted_vals[sorted_vals > 0]
ip_value = None
ea_value = None
if len(ip_vals):
core.set_variable("EP2 IONIZATION POTENTIAL", ip_vals[-1])
core.set_variable("CURRENT ENERGY", ip_vals[-1])
if len(ea_vals):
core.set_variable("EP2 ELECTRON AFFINITY", ea_vals[0])
if core.variable("EP2 IONIZATION POTENTIAL") == 0.0:
core.set_variable("CURRENT ENERGY", ea_vals[0])
core.print_out(" EP2 has completed successfully!\n\n")
core.tstop()
return dfep2_wfn
def run_dlpnomp2(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DLPNO-MP2 calculation.
"""
optstash = p4util.OptionsState(
['DF_BASIS_MP2'],
['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(""" SCF Algorithm Type (re)set to DF.\n""")
# DLPNO-MP2 is only DF
if core.get_global_option('MP2_TYPE') != "DF":
raise ValidationError(""" DLPNO-MP2 is only implemented with density fitting.\n"""
""" 'mp2_type' must be set to 'DF'.\n""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
elif ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" DLPNO-MP2 does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
if core.get_global_option('REFERENCE') != "RHF":
raise ValidationError("DLPNO-MP2 is not available for %s references.",
core.get_global_option('REFERENCE'))
core.tstart()
core.print_out('\n')
p4util.banner('DLPNO-MP2')
core.print_out('\n')
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_MP2",
core.get_option("DLPNO", "DF_BASIS_MP2"),
"RIFIT", core.get_global_option('BASIS'))
ref_wfn.set_basisset("DF_BASIS_MP2", aux_basis)
dlpnomp2_wfn = core.dlpno(ref_wfn)
dlpnomp2_wfn.compute_energy()
if name == 'scs-dlpno-mp2':
dlpnomp2_wfn.set_variable('CURRENT ENERGY', dlpnomp2_wfn.variable('SCS-MP2 TOTAL ENERGY'))
dlpnomp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dlpnomp2_wfn.variable('SCS-MP2 CORRELATION ENERGY'))
elif name == 'dlpno-mp2':
dlpnomp2_wfn.set_variable('CURRENT ENERGY', dlpnomp2_wfn.variable('MP2 TOTAL ENERGY'))
dlpnomp2_wfn.set_variable('CURRENT CORRELATION ENERGY', dlpnomp2_wfn.variable('MP2 CORRELATION ENERGY'))
# Shove variables into global space
for k, v in dlpnomp2_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
core.tstop()
return dlpnomp2_wfn
def run_dmrgscf(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an DMRG calculation.
"""
optstash = p4util.OptionsState(
['SCF_TYPE'],
['DMRG', 'DMRG_CASPT2_CALC'])
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
if 'CASPT2' in name.upper():
core.set_local_option("DMRG", "DMRG_CASPT2_CALC", True)
dmrg_wfn = core.dmrg(ref_wfn)
optstash.restore()
# Shove variables into global space
for k, v in dmrg_wfn.variables().items():
core.set_variable(k, v)
return dmrg_wfn
def run_dmrgci(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an DMRG calculation.
"""
optstash = p4util.OptionsState(
['SCF_TYPE'],
['DMRG', 'DMRG_SCF_MAX_ITER'])
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
core.set_local_option('DMRG', 'DMRG_SCF_MAX_ITER', 1)
dmrg_wfn = core.dmrg(ref_wfn)
optstash.restore()
# Shove variables into global space
for k, v in dmrg_wfn.variables().items():
core.set_variable(k, v)
return dmrg_wfn
def run_psimrcc(name, **kwargs):
"""Function encoding sequence of PSI module calls for a PSIMRCC computation
using a reference from the MCSCF module
"""
mcscf_wfn = run_mcscf(name, **kwargs)
psimrcc_wfn = core.psimrcc(mcscf_wfn)
# Shove variables into global space
for k, v in psimrcc_wfn.variables().items():
core.set_variable(k, v)
return psimrcc_wfn
def run_psimrcc_scf(name, **kwargs):
"""Function encoding sequence of PSI module calls for a PSIMRCC computation
using a reference from the SCF module
"""
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
psimrcc_wfn = core.psimrcc(ref_wfn)
# Shove variables into global space
for k, v in psimrcc_wfn.variables().items():
core.set_variable(k, v)
return psimrcc_wfn
def run_sapt(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a SAPT calculation of any level.
"""
optstash = p4util.OptionsState(['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_basis = kwargs.pop('sapt_basis', 'dimer')
sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, sapt_basis)
# Need to ensure consistent orbital freezing
# between monomer and dimer computations
monomerA_basis = core.BasisSet.build(monomerA, "BASIS", core.get_global_option("BASIS"))
monomerB_basis = core.BasisSet.build(monomerB, "BASIS", core.get_global_option("BASIS"))
nfc_ab = monomerA_basis.n_frozen_core() + monomerB_basis.n_frozen_core()
if (core.get_option('SCF', 'REFERENCE') != 'RHF') and (name.upper() != "SAPT0"):
raise ValidationError('Only SAPT0 supports a reference different from \"reference rhf\".')
do_delta_mp2 = True if name.endswith('dmp2') else False
do_empirical_disp = True if '-d' in name.lower() else False
if do_empirical_disp:
## Make sure we are turning SAPT0 dispersion off
core.set_local_option('SAPT', 'SAPT0_E10', True)
core.set_local_option('SAPT', 'SAPT0_E20IND', True)
core.set_local_option('SAPT', 'SAPT0_E20Disp', False)
# raise Exception("")
ri = core.get_global_option('SCF_TYPE')
df_ints_io = core.get_option('SCF', 'DF_INTS_IO')
# inquire if above at all applies to dfmp2
core.IO.set_default_namespace('dimer')
core.print_out('\n')
p4util.banner('Dimer HF')
core.print_out('\n')
# Compute dimer wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.set_global_option('DF_INTS_IO', 'SAVE')
optstash2 = p4util.OptionsState(['NUM_FROZEN_DOCC'])
core.set_global_option("NUM_FROZEN_DOCC", nfc_ab)
core.timer_on("SAPT: Dimer SCF")
dimer_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
core.timer_off("SAPT: Dimer SCF")
if do_delta_mp2:
select_mp2(name, ref_wfn=dimer_wfn, **kwargs)
mp2_corl_interaction_e = core.variable('MP2 CORRELATION ENERGY')
optstash2.restore()
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.set_global_option('DF_INTS_IO', 'LOAD')
# Compute Monomer A wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.IO.change_file_namespace(97, 'dimer', 'monomerA')
core.IO.set_default_namespace('monomerA')
core.print_out('\n')
p4util.banner('Monomer A HF')
core.print_out('\n')
core.timer_on("SAPT: Monomer A SCF")
monomerA_wfn = scf_helper('RHF', molecule=monomerA, **kwargs)
core.timer_off("SAPT: Monomer A SCF")
if do_delta_mp2:
select_mp2(name, ref_wfn=monomerA_wfn, **kwargs)
mp2_corl_interaction_e -= core.variable('MP2 CORRELATION ENERGY')
# Compute Monomer B wavefunction
if (sapt_basis == 'dimer') and (ri == 'DF'):
core.IO.change_file_namespace(97, 'monomerA', 'monomerB')
core.IO.set_default_namespace('monomerB')
core.print_out('\n')
p4util.banner('Monomer B HF')
core.print_out('\n')
core.timer_on("SAPT: Monomer B SCF")
monomerB_wfn = scf_helper('RHF', molecule=monomerB, **kwargs)
core.timer_off("SAPT: Monomer B SCF")
# Delta MP2
if do_delta_mp2:
select_mp2(name, ref_wfn=monomerB_wfn, **kwargs)
mp2_corl_interaction_e -= core.variable('MP2 CORRELATION ENERGY')
core.set_variable("SAPT MP2 CORRELATION ENERGY", mp2_corl_interaction_e) # P::e SAPT
core.set_global_option('DF_INTS_IO', df_ints_io)
if core.get_option('SCF', 'REFERENCE') == 'RHF':
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERA, 'monomerA', 'dimer')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERB, 'monomerB', 'dimer')
core.IO.set_default_namespace('dimer')
core.set_local_option('SAPT', 'E_CONVERGENCE', 10e-10)
core.set_local_option('SAPT', 'D_CONVERGENCE', 10e-10)
if name in ['sapt0', 'ssapt0']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT0')
elif name == 'sapt2':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2')
elif name in ['sapt2+', 'sapt2+dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+(3)', 'sapt2+(3)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+3', 'sapt2+3dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', False)
elif name in ['sapt2+(ccd)', 'sapt2+(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name in ['sapt2+(3)(ccd)', 'sapt2+(3)(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name in ['sapt2+3(ccd)', 'sapt2+3(ccd)dmp2']:
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
# Make sure we are not going to run CPHF on ROHF, since its MO Hessian
# is not SPD
if core.get_option('SCF', 'REFERENCE') == 'ROHF':
core.set_local_option('SAPT', 'COUPLED_INDUCTION', False)
core.print_out(' Coupled induction not available for ROHF.\n')
core.print_out(' Proceeding with uncoupled induction only.\n')
core.print_out(" Constructing Basis Sets for SAPT...\n\n")
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_SAPT", core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_SAPT", aux_basis)
if core.get_global_option("DF_BASIS_ELST") == "":
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
else:
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_ELST", core.get_global_option("DF_BASIS_ELST"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
core.print_out('\n')
p4util.banner(name.upper())
core.print_out('\n')
e_sapt = core.sapt(dimer_wfn, monomerA_wfn, monomerB_wfn)
dimer_wfn.set_module("sapt")
from psi4.driver.qcdb.psivardefs import sapt_psivars
p4util.expand_psivars(sapt_psivars())
optstash.restore()
# Get the SAPT name right if doing empirical dispersion
if do_empirical_disp:
sapt_name = "sapt0"
else:
sapt_name = name
# Make sure we got induction, otherwise replace it with uncoupled induction
which_ind = 'IND'
target_ind = 'IND'
if not core.has_variable(' '.join((sapt_name.upper(), which_ind, 'ENERGY'))):
which_ind = 'IND,U'
for term in ['ELST', 'EXCH', 'DISP', 'TOTAL']:
core.set_variable(' '.join(['SAPT', term, 'ENERGY']),
core.variable(' '.join([sapt_name.upper(), term, 'ENERGY'])))
# Special induction case
core.set_variable(' '.join(['SAPT', target_ind, 'ENERGY']),
core.variable(' '.join([sapt_name.upper(), which_ind, 'ENERGY'])))
core.set_variable('CURRENT ENERGY', core.variable('SAPT TOTAL ENERGY'))
# Empirical dispersion
if do_empirical_disp:
proc_util.sapt_empirical_dispersion(name, dimer_wfn)
return dimer_wfn
def run_sapt_ct(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a charge-transfer SAPT calcuation of any level.
"""
optstash = p4util.OptionsState(
['SCF_TYPE'])
if 'ref_wfn' in kwargs:
core.print_out('\nWarning! Argument ref_wfn is not valid for sapt computations\n')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! SAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_dimer, monomerA, monomerB = proc_util.prepare_sapt_molecule(sapt_dimer, "dimer")
monomerAm = sapt_dimer.extract_subsets(1)
monomerAm.set_name('monomerAm')
monomerBm = sapt_dimer.extract_subsets(2)
monomerBm.set_name('monomerBm')
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError('SAPT requires requires \"reference rhf\".')
ri = core.get_global_option('SCF_TYPE')
df_ints_io = core.get_option('SCF', 'DF_INTS_IO')
# inquire if above at all applies to dfmp2
core.IO.set_default_namespace('dimer')
core.print_out('\n')
p4util.banner('Dimer HF')
core.print_out('\n')
core.set_global_option('DF_INTS_IO', 'SAVE')
dimer_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
core.set_global_option('DF_INTS_IO', 'LOAD')
if (ri == 'DF'):
core.IO.change_file_namespace(97, 'dimer', 'monomerA')
core.IO.set_default_namespace('monomerA')
core.print_out('\n')
p4util.banner('Monomer A HF (Dimer Basis)')
core.print_out('\n')
monomerA_wfn = scf_helper('RHF', molecule=monomerA, **kwargs)
if (ri == 'DF'):
core.IO.change_file_namespace(97, 'monomerA', 'monomerB')
core.IO.set_default_namespace('monomerB')
core.print_out('\n')
p4util.banner('Monomer B HF (Dimer Basis)')
core.print_out('\n')
monomerB_wfn = scf_helper('RHF', molecule=monomerB, **kwargs)
core.set_global_option('DF_INTS_IO', df_ints_io)
core.IO.set_default_namespace('monomerAm')
core.print_out('\n')
p4util.banner('Monomer A HF (Monomer Basis)')
core.print_out('\n')
monomerAm_wfn = scf_helper('RHF', molecule=monomerAm, **kwargs)
core.IO.set_default_namespace('monomerBm')
core.print_out('\n')
p4util.banner('Monomer B HF (Monomer Basis)')
core.print_out('\n')
monomerBm_wfn = scf_helper('RHF', molecule=monomerBm, **kwargs)
core.IO.set_default_namespace('dimer')
core.set_local_option('SAPT', 'E_CONVERGENCE', 10e-10)
core.set_local_option('SAPT', 'D_CONVERGENCE', 10e-10)
if name == 'sapt0-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT0')
elif name == 'sapt2-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2')
elif name == 'sapt2+-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
elif name == 'sapt2+(3)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
elif name == 'sapt2+3-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
elif name == 'sapt2+(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+')
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name == 'sapt2+(3)(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', False)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
elif name == 'sapt2+3(ccd)-ct':
core.set_local_option('SAPT', 'SAPT_LEVEL', 'SAPT2+3')
core.set_local_option('SAPT', 'DO_THIRD_ORDER', True)
core.set_local_option('SAPT', 'DO_CCD_DISP', True)
core.print_out('\n')
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_SAPT",
core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_SAPT", aux_basis)
if core.get_global_option("DF_BASIS_ELST") == "":
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
else:
aux_basis = core.BasisSet.build(dimer_wfn.molecule(), "DF_BASIS_ELST",
core.get_global_option("DF_BASIS_ELST"),
"RIFIT", core.get_global_option("BASIS"))
dimer_wfn.set_basisset("DF_BASIS_ELST", aux_basis)
core.print_out('\n')
p4util.banner('SAPT Charge Transfer')
core.print_out('\n')
core.print_out('\n')
p4util.banner('Dimer Basis SAPT')
core.print_out('\n')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERA, 'monomerA', 'dimer')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERB, 'monomerB', 'dimer')
e_sapt = core.sapt(dimer_wfn, monomerA_wfn, monomerB_wfn)
CTd = core.variable('SAPT CT ENERGY')
dimer_wfn.set_module("sapt")
core.print_out('\n')
p4util.banner('Monomer Basis SAPT')
core.print_out('\n')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERA, 'monomerAm', 'dimer')
core.IO.change_file_namespace(psif.PSIF_SAPT_MONOMERB, 'monomerBm', 'dimer')
e_sapt = core.sapt(dimer_wfn, monomerAm_wfn, monomerBm_wfn)
CTm = core.variable('SAPT CT ENERGY')
CT = CTd - CTm
units = (1000.0, constants.hartree2kcalmol, constants.hartree2kJmol)
core.print_out('\n\n')
core.print_out(' SAPT Charge Transfer Analysis\n')
core.print_out(' ------------------------------------------------------------------------------------------------\n')
core.print_out(' SAPT Induction (Dimer Basis) %12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n' %
tuple(CTd * u for u in units))
core.print_out(' SAPT Induction (Monomer Basis)%12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n' %
tuple(CTm * u for u in units))
core.print_out(' SAPT Charge Transfer %12.4lf [mEh] %12.4lf [kcal/mol] %12.4lf [kJ/mol]\n\n' %
tuple(CT * u for u in units))
core.set_variable("SAPT CT ENERGY", CT) # P::e SAPT
optstash.restore()
return dimer_wfn
def run_fisapt(name, **kwargs):
"""Function encoding sequence of PSI module calls for
an F/ISAPT0 computation
"""
optstash = p4util.OptionsState(['SCF_TYPE'])
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# Get the molecule of interest
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
sapt_dimer = kwargs.pop('molecule', core.get_active_molecule())
else:
core.print_out('Warning! FISAPT argument "ref_wfn" is only able to use molecule information.')
sapt_dimer = ref_wfn.molecule()
sapt_dimer.update_geometry() # make sure since mol from wfn, kwarg, or P::e
# Shifting to C1 so we need to copy the active molecule
if sapt_dimer.schoenflies_symbol() != 'c1':
core.print_out(' FISAPT does not make use of molecular symmetry, further calculations in C1 point group.\n')
sapt_dimer = sapt_dimer.clone()
sapt_dimer.reset_point_group('c1')
sapt_dimer.fix_orientation(True)
sapt_dimer.fix_com(True)
sapt_dimer.update_geometry()
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError('FISAPT requires requires \"reference rhf\".')
if ref_wfn is None:
core.timer_on("FISAPT: Dimer SCF")
ref_wfn = scf_helper('RHF', molecule=sapt_dimer, **kwargs)
core.timer_off("FISAPT: Dimer SCF")
core.print_out(" Constructing Basis Sets for FISAPT...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(),
"DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT",
core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
sapt_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SAPT", core.get_global_option("DF_BASIS_SAPT"),
"RIFIT", core.get_global_option("BASIS"),
ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SAPT", sapt_basis)
minao = core.BasisSet.build(ref_wfn.molecule(), "BASIS", core.get_global_option("MINAO_BASIS"))
ref_wfn.set_basisset("MINAO", minao)
# Turn of dispersion for -d
if "-d" in name.lower():
core.set_local_option("FISAPT", "FISAPT_DO_FSAPT_DISP", False)
fisapt_wfn = core.FISAPT(ref_wfn)
from .sapt import fisapt_proc
fisapt_wfn.compute_energy(external_potentials=kwargs.get("external_potentials", None))
# Compute -D dispersion
if "-d" in name.lower():
proc_util.sapt_empirical_dispersion(name, ref_wfn)
optstash.restore()
return ref_wfn
def run_mrcc(name, **kwargs):
"""Function that prepares environment and input files
for a calculation calling Kallay's MRCC code.
"""
# Check to see if we really need to run the SCF code.
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs)
vscf = ref_wfn.variable('SCF TOTAL ENERGY')
# The parse_arbitrary_order method provides us the following information
# We require that level be provided. level is a dictionary
# of settings to be passed to core.mrcc
if not('level' in kwargs):
raise ValidationError('level parameter was not provided.')
level = kwargs['level']
# Fullname is the string we need to search for in iface
fullname = level['fullname']
# User can provide 'keep' to the method.
# When provided, do not delete the MRCC scratch directory.
keep = False
if 'keep' in kwargs:
keep = kwargs['keep']
# Save current directory location
current_directory = os.getcwd()
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) +
':' + os.environ.get('PATH'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Need to move to the scratch directory, perferrably into a separate directory in that location
psi_io = core.IOManager.shared_object()
os.chdir(psi_io.get_default_path())
# Make new directory specifically for mrcc
mrcc_tmpdir = 'mrcc_' + str(os.getpid())
if 'path' in kwargs:
mrcc_tmpdir = kwargs['path']
# Check to see if directory already exists, if not, create.
if os.path.exists(mrcc_tmpdir) is False:
os.mkdir(mrcc_tmpdir)
# Move into the new directory
os.chdir(mrcc_tmpdir)
# Generate integrals and input file (dumps files to the current directory)
core.mrcc_generate_input(ref_wfn, level)
# Load the fort.56 file
# and dump a copy into the outfile
core.print_out('\n===== Begin fort.56 input for MRCC ======\n')
core.print_out(open('fort.56', 'r').read())
core.print_out('===== End fort.56 input for MRCC ======\n')
# Modify the environment:
# PGI Fortan prints warning to screen if STOP is used
lenv['NO_STOP_MESSAGE'] = '1'
# Obtain the number of threads MRCC should use
lenv['OMP_NUM_THREADS'] = str(core.get_num_threads())
# If the user provided MRCC_OMP_NUM_THREADS set the environ to it
if core.has_option_changed('MRCC', 'MRCC_OMP_NUM_THREADS'):
lenv['OMP_NUM_THREADS'] = str(core.get_option('MRCC', 'MRCC_OMP_NUM_THREADS'))
# Call dmrcc, directing all screen output to the output file
external_exe = 'dmrcc'
try:
retcode = subprocess.Popen([external_exe], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (external_exe, e.strerror))
core.print_out('Program %s not found in path or execution failed: %s\n' % (external_exe, e.strerror))
message = ("Program %s not found in path or execution failed: %s\n" % (external_exe, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
core.print_out(data.decode('utf-8'))
c4out += data.decode('utf-8')
# Scan iface file and grab the file energy.
ene = 0.0
for line in open('iface'):
fields = line.split()
m = fields[1]
try:
ene = float(fields[5])
if m == "MP(2)":
m = "MP2"
core.set_variable(m + ' TOTAL ENERGY', ene)
core.set_variable(m + ' CORRELATION ENERGY', ene - vscf)
except ValueError:
continue
# The last 'ene' in iface is the one the user requested.
core.set_variable('CURRENT ENERGY', ene)
core.set_variable('CURRENT CORRELATION ENERGY', ene - vscf)
# Load the iface file
iface = open('iface', 'r')
iface_contents = iface.read()
# Delete mrcc tempdir
os.chdir('..')
try:
# Delete unless we're told not to
if (keep is False and not('path' in kwargs)):
shutil.rmtree(mrcc_tmpdir)
except OSError as e:
print('Unable to remove MRCC temporary directory %s' % e, file=sys.stderr)
exit(1)
# Return to submission directory
os.chdir(current_directory)
# If we're told to keep the files or the user provided a path, do nothing.
if keep or ('path' in kwargs):
core.print_out('\nMRCC scratch files have been kept.\n')
core.print_out('They can be found in ' + mrcc_tmpdir)
# Dump iface contents to output
core.print_out('\n')
p4util.banner('Full results from MRCC')
core.print_out('\n')
core.print_out(iface_contents)
return ref_wfn
def run_fnodfcc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a DF-CCSD(T) computation.
>>> set cc_type df
>>> energy('fno-ccsd(t)')
"""
kwargs = p4util.kwargs_lower(kwargs)
# stash user options
optstash = p4util.OptionsState(
['FNOCC', 'COMPUTE_TRIPLES'],
['FNOCC', 'DFCC'],
['FNOCC', 'NAT_ORBS'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'DF_BASIS_CC'],
['SCF', 'DF_BASIS_SCF'],
['SCF', 'DF_INTS_IO'])
core.set_local_option('FNOCC', 'DFCC', True)
core.set_local_option('FNOCC', 'RUN_CEPA', False)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError(f"""Error: {name} requires 'reference rhf'.""")
def set_cholesky_from(mtd_type):
type_val = core.get_global_option(mtd_type)
if type_val == 'CD':
core.set_local_option('FNOCC', 'DF_BASIS_CC', 'CHOLESKY')
# Alter default algorithm
if not core.has_global_option_changed('SCF_TYPE'):
optstash.add_option(['SCF_TYPE'])
core.set_global_option('SCF_TYPE', 'CD')
core.print_out(""" SCF Algorithm Type (re)set to CD.\n""")
elif type_val in ['DISK_DF', 'DF']:
if core.get_option('FNOCC', 'DF_BASIS_CC') == 'CHOLESKY':
core.set_local_option('FNOCC', 'DF_BASIS_CC', '')
proc_util.check_disk_df(name.upper(), optstash)
else:
raise ValidationError("""Invalid type '%s' for DFCC""" % type_val)
# triples?
if name == 'ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
set_cholesky_from('CC_TYPE')
elif name == 'ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
set_cholesky_from('CC_TYPE')
elif name == 'fno-ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
set_cholesky_from('CC_TYPE')
elif name == 'fno-ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
set_cholesky_from('CC_TYPE')
if core.get_global_option('SCF_TYPE') not in ['CD', 'DISK_DF']:
raise ValidationError("""Invalid scf_type for DFCC.""")
# save DF or CD ints generated by SCF for use in CC
core.set_local_option('SCF', 'DF_INTS_IO', 'SAVE')
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, use_c1=True, **kwargs) # C1 certified
else:
if ref_wfn.molecule().schoenflies_symbol() != 'c1':
raise ValidationError(""" FNOCC does not make use of molecular symmetry: """
"""reference wavefunction must be C1.\n""")
core.print_out(" Constructing Basis Sets for FNOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_CC",
core.get_global_option("DF_BASIS_CC"),
"RIFIT", core.get_global_option("BASIS"))
ref_wfn.set_basisset("DF_BASIS_CC", aux_basis)
if core.get_global_option("RELATIVISTIC") in ["X2C", "DKH"]:
rel_bas = core.BasisSet.build(ref_wfn.molecule(), "BASIS_RELATIVISTIC",
core.get_option("SCF", "BASIS_RELATIVISTIC"),
"DECON", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset('BASIS_RELATIVISTIC',rel_bas)
fnocc_wfn = core.fnocc(ref_wfn)
# Shove variables into global space
for k, v in fnocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return fnocc_wfn
def run_fnocc(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a QCISD(T), CCSD(T), MP2.5, MP3, and MP4 computation.
>>> energy('fno-ccsd(t)')
"""
kwargs = p4util.kwargs_lower(kwargs)
level = kwargs.get('level', 0)
# stash user options:
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['FNOCC', 'RUN_MP2'],
['FNOCC', 'RUN_MP3'],
['FNOCC', 'RUN_MP4'],
['FNOCC', 'RUN_CCSD'],
['FNOCC', 'COMPUTE_TRIPLES'],
['FNOCC', 'COMPUTE_MP4_TRIPLES'],
['FNOCC', 'DFCC'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'USE_DF_INTS'],
['FNOCC', 'NAT_ORBS'])
core.set_local_option('FNOCC', 'DFCC', False)
core.set_local_option('FNOCC', 'RUN_CEPA', False)
core.set_local_option('FNOCC', 'USE_DF_INTS', False)
# which method?
if name == 'ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
elif name == 'ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
elif name == 'fno-ccsd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-ccsd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'qcisd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'qcisd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'fno-qcisd':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-qcisd(t)':
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
core.set_local_option('FNOCC', 'RUN_CCSD', False)
elif name == 'mp2':
core.set_local_option('FNOCC', 'RUN_MP2', True)
elif name == 'fno-mp3':
core.set_local_option('FNOCC', 'RUN_MP3', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'fno-mp4':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', True)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'mp4(sdq)':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', False)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
elif name == 'fno-mp4(sdq)':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', False)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', False)
core.set_local_option('FNOCC', 'NAT_ORBS', True)
elif name == 'mp3':
core.set_local_option('FNOCC', 'RUN_MP3', True)
elif name == 'mp4':
core.set_local_option('FNOCC', 'RUN_MP4', True)
core.set_local_option('FNOCC', 'COMPUTE_MP4_TRIPLES', True)
core.set_local_option('FNOCC', 'COMPUTE_TRIPLES', True)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError(f"""Error: {name} requires 'reference rhf'.""")
# Bypass the scf call if a reference wavefunction is given
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if not core.get_option('FNOCC', 'USE_DF_INTS'):
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
else:
core.print_out(" Constructing Basis Sets for FNOCC...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
if core.get_global_option("RELATIVISTIC") in ["X2C", "DKH"]:
rel_bas = core.BasisSet.build(ref_wfn.molecule(), "BASIS_RELATIVISTIC",
core.get_option("SCF", "BASIS_RELATIVISTIC"),
"DECON", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset('BASIS_RELATIVISTIC',rel_bas)
fnocc_wfn = core.fnocc(ref_wfn)
# set current correlation energy and total energy. only need to treat mpn here.
if name in ["mp3", "fno-mp3"]:
fnocc_wfn.set_variable("CURRENT ENERGY", fnocc_wfn.variable("MP3 TOTAL ENERGY"))
fnocc_wfn.set_variable("CURRENT CORRELATION ENERGY", fnocc_wfn.variable("MP3 CORRELATION ENERGY"))
elif name in ["mp4(sdq)", "fno-mp4(sdq)"]:
fnocc_wfn.set_variable("CURRENT ENERGY", fnocc_wfn.variable("MP4(SDQ) TOTAL ENERGY"))
fnocc_wfn.set_variable("CURRENT CORRELATION ENERGY", fnocc_wfn.variable("MP4(SDQ) CORRELATION ENERGY"))
elif name in ["mp4", "fno-mp4"]:
fnocc_wfn.set_variable("CURRENT ENERGY", fnocc_wfn.variable("MP4 TOTAL ENERGY"))
fnocc_wfn.set_variable("CURRENT CORRELATION ENERGY", fnocc_wfn.variable("MP4 CORRELATION ENERGY"))
# Shove variables into global space
for k, v in fnocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return fnocc_wfn
def run_cepa(name, **kwargs):
"""Function encoding sequence of PSI module calls for
a cepa-like calculation.
>>> energy('cepa(1)')
"""
kwargs = p4util.kwargs_lower(kwargs)
# save user options
optstash = p4util.OptionsState(
['TRANSQT2', 'WFN'],
['FNOCC', 'NAT_ORBS'],
['FNOCC', 'RUN_CEPA'],
['FNOCC', 'USE_DF_INTS'],
['FNOCC', 'CEPA_NO_SINGLES'])
core.set_local_option('FNOCC', 'RUN_CEPA', True)
core.set_local_option('FNOCC', 'USE_DF_INTS', False)
# what type of cepa?
if name in ['lccd', 'fno-lccd']:
cepa_level = 'cepa(0)'
core.set_local_option('FNOCC', 'CEPA_NO_SINGLES', True)
elif name in ['cepa(0)', 'fno-cepa(0)', 'lccsd', 'fno-lccsd']:
cepa_level = 'cepa(0)'
core.set_local_option('FNOCC', 'CEPA_NO_SINGLES', False)
elif name in ['cepa(1)', 'fno-cepa(1)']:
cepa_level = 'cepa(1)'
elif name in ['cepa(3)', 'fno-cepa(3)']:
cepa_level = 'cepa(3)'
elif name in ['acpf', 'fno-acpf']:
cepa_level = 'acpf'
elif name in ['aqcc', 'fno-aqcc']:
cepa_level = 'aqcc'
elif name in ['cisd', 'fno-cisd']:
cepa_level = 'cisd'
else:
raise ValidationError("""Error: %s not implemented\n""" % name)
core.set_local_option('FNOCC', 'CEPA_LEVEL', cepa_level.upper())
if name in ['fno-lccd', 'fno-lccsd', 'fno-cepa(0)', 'fno-cepa(1)', 'fno-cepa(3)',
'fno-acpf', 'fno-aqcc', 'fno-cisd']:
core.set_local_option('FNOCC', 'NAT_ORBS', True)
# throw an exception for open-shells
if core.get_option('SCF', 'REFERENCE') != 'RHF':
raise ValidationError("""Error: %s requires 'reference rhf'.""" % name)
reference = core.get_option('SCF', 'REFERENCE')
if core.get_global_option('CC_TYPE') != "CONV":
raise ValidationError("""CEPA methods from FNOCC module require 'cc_type conv'.""")
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_wfn = scf_helper(name, **kwargs) # C1 certified
if not core.get_option('FNOCC', 'USE_DF_INTS'):
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
else:
core.print_out(" Constructing Basis Sets for FISAPT...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
fnocc_wfn = core.fnocc(ref_wfn)
# one-electron properties
if core.get_option('FNOCC', 'DIPMOM'):
if cepa_level in ['cepa(1)', 'cepa(3)']:
core.print_out("""\n Error: one-electron properties not implemented for %s\n\n""" % name)
elif core.get_option('FNOCC', 'NAT_ORBS'):
core.print_out("""\n Error: one-electron properties not implemented for %s\n\n""" % name)
else:
p4util.oeprop(fnocc_wfn, 'DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'NO_OCCUPATIONS', title=cepa_level.upper())
# Shove variables into global space
for k, v in fnocc_wfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return fnocc_wfn
def run_detcas(name, **kwargs):
"""Function encoding sequence of PSI module calls for
determinant-based multireference wavefuncations,
namely CASSCF and RASSCF.
"""
optstash = p4util.OptionsState(
['DETCI', 'WFN'],
['SCF_TYPE'],
['ONEPDM'],
['OPDM_RELAX']
)
user_ref = core.get_option('DETCI', 'REFERENCE')
if user_ref not in ['RHF', 'ROHF']:
raise ValidationError('Reference %s for DETCI is not available.' % user_ref)
if name == 'rasscf':
core.set_local_option('DETCI', 'WFN', 'RASSCF')
elif name == 'casscf':
core.set_local_option('DETCI', 'WFN', 'CASSCF')
else:
raise ValidationError("Run DETCAS: Name %s not understood" % name)
ref_wfn = kwargs.get('ref_wfn', None)
if ref_wfn is None:
ref_optstash = p4util.OptionsState(
['SCF_TYPE'],
['DF_BASIS_SCF'],
['DF_BASIS_MP2'],
['ONEPDM'],
['OPDM_RELAX']
)
# No real reason to do a conventional guess
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
# If RHF get MP2 NO's
# Why doesnt this work for conv?
if (('DF' in core.get_global_option('SCF_TYPE')) and (user_ref == 'RHF') and
(core.get_option('DETCI', 'MCSCF_TYPE') in ['DF', 'AO']) and
(core.get_option("DETCI", "MCSCF_GUESS") == "MP2")):
core.set_global_option('ONEPDM', True)
core.set_global_option('OPDM_RELAX', False)
ref_wfn = run_dfmp2_gradient(name, **kwargs)
else:
ref_wfn = scf_helper(name, **kwargs)
# Ensure IWL files have been written
if (core.get_option('DETCI', 'MCSCF_TYPE') == 'CONV'):
mints = core.MintsHelper(ref_wfn.basisset())
mints.set_print(1)
mints.integrals()
ref_optstash.restore()
# The DF case
if core.get_option('DETCI', 'MCSCF_TYPE') == 'DF':
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DF')
core.print_out(" Constructing Basis Sets for MCSCF...\n\n")
scf_aux_basis = core.BasisSet.build(ref_wfn.molecule(), "DF_BASIS_SCF",
core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", core.get_global_option('BASIS'),
puream=ref_wfn.basisset().has_puream())
ref_wfn.set_basisset("DF_BASIS_SCF", scf_aux_basis)
# The AO case
elif core.get_option('DETCI', 'MCSCF_TYPE') == 'AO':
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'DIRECT')
# The conventional case
elif core.get_option('DETCI', 'MCSCF_TYPE') == 'CONV':
if not core.has_global_option_changed('SCF_TYPE'):
core.set_global_option('SCF_TYPE', 'PK')
# Ensure IWL files have been written
proc_util.check_iwl_file_from_scf_type(core.get_global_option('SCF_TYPE'), ref_wfn)
else:
raise ValidationError("Run DETCAS: MCSCF_TYPE %s not understood." % str(core.get_option('DETCI', 'MCSCF_TYPE')))
# Second-order SCF requires non-symmetric density matrix support
if core.get_option('DETCI', 'MCSCF_ALGORITHM') in ['AH', 'OS']:
proc_util.check_non_symmetric_jk_density("Second-order MCSCF")
ciwfn = mcscf.mcscf_solver(ref_wfn)
# We always would like to print a little dipole information
oeprop = core.OEProp(ciwfn)
oeprop.set_title(name.upper())
oeprop.add("DIPOLE")
oeprop.compute()
ciwfn.oeprop = oeprop
# retire components by v1.5
with warnings.catch_warnings():
warnings.simplefilter("ignore")
core.set_variable("CURRENT DIPOLE X", core.variable(name.upper() + " DIPOLE X"))
core.set_variable("CURRENT DIPOLE Y", core.variable(name.upper() + " DIPOLE Y"))
core.set_variable("CURRENT DIPOLE Z", core.variable(name.upper() + " DIPOLE Z"))
core.set_variable("CURRENT DIPOLE", core.variable(name.upper() + " DIPOLE"))
# Shove variables into global space
for k, v in ciwfn.variables().items():
core.set_variable(k, v)
optstash.restore()
return ciwfn
def run_efp(name, **kwargs):
"""Function encoding sequence of module calls for a pure EFP
computation (ignore any QM atoms).
"""
efp_molecule = kwargs.get('molecule', core.get_active_molecule())
try:
efpobj = efp_molecule.EFP
except AttributeError:
raise ValidationError("""Method 'efp' not available without EFP fragments in molecule""")
# print efp geom in [A]
core.print_out(efpobj.banner())
core.print_out(efpobj.geometry_summary(units_to_bohr=constants.bohr2angstroms))
# set options
# * 'chtr', 'qm_exch', 'qm_disp', 'qm_chtr' may be enabled in a future libefp release
efpopts = {}
for opt in ['elst', 'exch', 'ind', 'disp',
'elst_damping', 'ind_damping', 'disp_damping']:
psiopt = 'EFP_' + opt.upper()
if core.has_option_changed('EFP', psiopt):
efpopts[opt] = core.get_option('EFP', psiopt)
efpopts['qm_elst'] = False
efpopts['qm_ind'] = False
efpobj.set_opts(efpopts, label='psi', append='psi')
do_gradient = core.get_option('EFP', 'DERTYPE') == 'FIRST'
# compute and report
efpobj.compute(do_gradient=do_gradient)
core.print_out(efpobj.energy_summary(label='psi'))
ene = efpobj.get_energy(label='psi')
core.set_variable('EFP ELST ENERGY', ene['electrostatic'] + ene['charge_penetration'] + ene['electrostatic_point_charges'])
core.set_variable('EFP IND ENERGY', ene['polarization'])
core.set_variable('EFP DISP ENERGY', ene['dispersion'])
core.set_variable('EFP EXCH ENERGY', ene['exchange_repulsion'])
core.set_variable('EFP TOTAL ENERGY', ene['total'])
core.set_variable('CURRENT ENERGY', ene['total'])
if do_gradient:
core.print_out(efpobj.gradient_summary())
torq = efpobj.get_gradient()
torq = core.Matrix.from_array(np.asarray(torq).reshape(-1, 6))
core.set_variable("EFP TORQUE", torq) # P::e EFP
return ene['total']
|
psi4/psi4
|
psi4/driver/procrouting/proc.py
|
Python
|
lgpl-3.0
| 203,989
|
[
"Psi4"
] |
fc32899d9aa2f128f3f81d746496c93de54acb4553c73bbeeaaefc71ff58698a
|
# NOTE: This example uses the next generation Twilio helper library - for more
# information on how to download and install this version, visit
# https://www.twilio.com/docs/libraries/python
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account = os.environ['TWILIO_ACCOUNT_SID']
token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account, token)
credentials = client.notify.credentials.list()
for credential in credentials:
print(credential.friendly_name)
|
TwilioDevEd/api-snippets
|
notifications/rest/credentials/list-credential/list-credential.7.x.py
|
Python
|
mit
| 590
|
[
"VisIt"
] |
1ae7504e8f8a4d4d51b9ab268a946d86e4233437cf35fff73db734abab97fb7e
|
#!/usr/bin/env python
#
# Wrapper script for starting the biopet-vcffilter JAR package
#
# This script is written for use with the Conda package manager and is copied
# from the peptide-shaker wrapper. Only the parameters are changed.
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
#
# This file was automatically generated by the sbt-bioconda plugin.
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'vcffilter-assembly-0.2.jar'
default_jvm_mem_opts = []
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
java = java_executable()
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
joachimwolff/bioconda-recipes
|
recipes/biopet-vcffilter/biopet-vcffilter.py
|
Python
|
mit
| 3,369
|
[
"Bioconda"
] |
4adf52ae1bcfa74ef3f916035fc4caee619f3c98cddd06ddeeb28d358553e4d7
|
"""
This class brings together a L{solve.Solver} to choose a set of implmentations, a
L{fetch.Fetcher} to download additional components, and the user's configuration
settings.
@since: 0.53
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, logger
import os, logging
from zeroinstall.injector import arch, reader
from zeroinstall.injector.model import network_offline
from zeroinstall.support import tasks
class Driver(object):
"""Chooses a set of implementations based on a policy.
Typical use:
1. Create a Driver object, giving it the requirements about the program to be run.
2. Call L{solve_with_downloads}. If more information is needed, a L{fetch.Fetcher} will be used to download it.
3. When all downloads are complete, the L{solver} contains the chosen versions.
4. Use L{get_uncached_implementations} to find where to get these versions and download them
using L{download_uncached_implementations}.
@ivar target_arch: target architecture for binaries (deprecated)
@type target_arch: L{arch.Architecture}
@ivar solver: solver used to choose a set of implementations
@type solver: L{solve.Solver}
@ivar watchers: callbacks to invoke after solving
"""
__slots__ = ['watchers', 'requirements', 'config', 'target_arch', 'solver']
def __init__(self, config, requirements):
"""@param config: The configuration settings to use
@type config: L{config.Config}
@param requirements: Details about the program we want to run
@type requirements: L{requirements.Requirements}
@since: 0.53"""
self.watchers = []
assert config
self.config = config
assert requirements
self.requirements = requirements
self.target_arch = arch.get_architecture(requirements.os, requirements.cpu)
from zeroinstall.injector.solver import DefaultSolver
self.solver = DefaultSolver(self.config)
logger.debug(_("Supported systems: '%s'"), arch.os_ranks)
logger.debug(_("Supported processors: '%s'"), arch.machine_ranks)
self.solver.extra_restrictions = requirements.get_extra_restrictions(self.config.iface_cache)
def get_uncached_implementations(self):
"""List all chosen implementations which aren't yet available locally.
@rtype: [(L{model.Interface}, L{model.Implementation})]"""
iface_cache = self.config.iface_cache
stores = self.config.stores
uncached = []
for uri, selection in self.solver.selections.selections.items():
impl = selection.impl
assert impl, self.solver.selections
if not impl.is_available(stores):
uncached.append((iface_cache.get_interface(uri), impl))
return uncached
@tasks.async
def solve_with_downloads(self, force = False, update_local = False):
"""Run the solver, then download any feeds that are missing or
that need to be updated. Each time a new feed is imported into
the cache, the solver is run again, possibly adding new downloads.
@param force: whether to download even if we're already ready to run.
@type force: bool
@param update_local: fetch PackageKit feeds even if we're ready to run.
@type update_local: bool"""
downloads_finished = set() # Successful or otherwise
downloads_in_progress = {} # URL -> Download
# There are three cases:
# 1. We want to run immediately if possible. If not, download all the information we can.
# (force = False, update_local = False)
# 2. We're in no hurry, but don't want to use the network unnecessarily.
# We should still update local information (from PackageKit).
# (force = False, update_local = True)
# 3. The user explicitly asked us to refresh everything.
# (force = True)
try_quick_exit = not (force or update_local)
while True:
self.solver.solve_for(self.requirements)
for w in self.watchers: w()
if try_quick_exit and self.solver.ready:
break
try_quick_exit = False
if not self.solver.ready:
force = True
for f in self.solver.feeds_used:
if f in downloads_finished or f in downloads_in_progress:
continue
if os.path.isabs(f):
if force:
try:
self.config.iface_cache.get_feed(f, force = True)
except reader.MissingLocalFeed as ex:
logger.warning("Reloading %s: %s", f, ex,
exc_info = True if logger.isEnabledFor(logging.INFO) else None)
downloads_in_progress[f] = tasks.IdleBlocker('Refresh local feed')
continue
elif f.startswith('distribution:'):
if force or update_local:
downloads_in_progress[f] = self.config.fetcher.download_and_import_feed(f, self.config.iface_cache)
elif force and self.config.network_use != network_offline:
downloads_in_progress[f] = self.config.fetcher.download_and_import_feed(f, self.config.iface_cache)
# Once we've starting downloading some things,
# we might as well get them all.
force = True
if not downloads_in_progress:
if self.config.network_use == network_offline:
logger.info(_("Can't choose versions and in off-line mode, so aborting"))
break
# Wait for at least one download to finish
blockers = downloads_in_progress.values()
yield blockers
tasks.check(blockers, self.config.handler.report_error)
for f in list(downloads_in_progress.keys()):
if f in downloads_in_progress and downloads_in_progress[f].happened:
del downloads_in_progress[f]
downloads_finished.add(f)
# Need to refetch any "distribution" feed that
# depends on this one
distro_feed_url = 'distribution:' + f
if distro_feed_url in downloads_finished:
downloads_finished.remove(distro_feed_url)
if distro_feed_url in downloads_in_progress:
del downloads_in_progress[distro_feed_url]
@tasks.async
def solve_and_download_impls(self, refresh = False, select_only = False):
"""Run L{solve_with_downloads} and then get the selected implementations too.
@type refresh: bool
@type select_only: bool
@raise SafeException: if we couldn't select a set of implementations
@since: 0.40"""
refreshed = self.solve_with_downloads(refresh)
if refreshed:
yield refreshed
tasks.check(refreshed)
if not self.solver.ready:
raise self.solver.get_failure_reason()
if not select_only:
downloaded = self.download_uncached_implementations()
if downloaded:
yield downloaded
tasks.check(downloaded)
def need_download(self):
"""Decide whether we need to download anything (but don't do it!)
@return: true if we MUST download something (feeds or implementations)
@rtype: bool"""
self.solver.solve_for(self.requirements)
for w in self.watchers: w()
if not self.solver.ready:
return True # Maybe a newer version will work?
if self.get_uncached_implementations():
return True
return False
def download_uncached_implementations(self):
"""Download all implementations chosen by the solver that are missing from the cache.
@rtype: L{zeroinstall.support.tasks.Blocker}"""
assert self.solver.ready, "Solver is not ready!\n%s" % self.solver.selections
return self.solver.selections.download_missing(self.config, include_packages = True)
|
rammstein/0install
|
zeroinstall/injector/driver.py
|
Python
|
lgpl-2.1
| 7,097
|
[
"VisIt"
] |
386f635793f363f634c6d31939dd1be852812c94036b7f5e4516c73aa43a81b0
|
# https://flashair-developers.com/en/documents/api/
from __future__ import unicode_literals, print_function
import attr
from attr.validators import and_, instance_of
from constantly import FlagConstant, Flags
from twisted.python.url import URL
from twisted.python.filepath import FilePath
from twisted.internet.defer import inlineCallbacks
from twisted.web.http import OK
from twisted.web.client import PartialDownloadError, readBody
class FileAttributes(Flags):
READONLY = FlagConstant()
HIDDEN = FlagConstant()
SYSTEM = FlagConstant()
VOLUME = FlagConstant()
DIRECTLY = FlagConstant()
ARCHIVE = FlagConstant()
@attr.s(frozen=True)
class File(object):
name = attr.ib(validator=instance_of(FilePath))
size = attr.ib(validator=instance_of(int))
attributes = attr.ib(
validator=lambda self, attr, value: (
value in FileAttributes.iterconstants()
),
)
date = attr.ib(validator=instance_of(int))
time = attr.ib(validator=instance_of(int))
def has_attribute(which):
def validator(self, attr, file):
value = file.attributes
if value & which:
return
raise ValueError(
"{} missing required attribute {}".format(value, which)
)
return validator
@attr.s(frozen=True)
class DeleteFile(object):
file = attr.ib(
validator=and_(
instance_of(File),
has_attribute(FileAttributes.ARCHIVE),
),
)
def uri(self):
return URL(
path=["upload.cgi"],
query=[
("DEL", self.file.name.path),
],
)
def headers(self):
return None
def body(self):
return None
def process_response(self, response):
d = readBody(response)
def read(body):
if response.code != OK:
raise Exception(
"Unexpected response code {}:\n{}".format(response.code, body)
)
return None
d.addCallback(read)
return d
@attr.s(frozen=True)
class GetFileList(object):
opcode = 100
minimum_version = "1.00.03"
directory = attr.ib(validator=instance_of(FilePath))
def uri(self):
return URL(
path=["command.cgi"],
query=[
("op", "{}".format(self.opcode)),
("DIR", self.directory.path),
],
)
def headers(self):
return None
def body(self):
return None
def process_response(self, response):
d = readBody(response)
def read(body):
if response.code != OK:
raise Exception(
"Unexpected response code {}:\n{}".format(response.code, body)
)
lines = body.decode("utf-8").split("\r\n")
if lines[0] == "WLANSD_FILELIST": # XXX???
lines = lines[1:-1]
else:
raise Exception("Whuat? {}".format(lines))
for line in lines:
parts = line.split(",")
if len(parts) != 6:
raise Exception("Uauaua {}".format(parts))
yield File(
name=FilePath(parts[0] or "/").child(parts[1]),
size=int(parts[2]),
attributes=lookupByValue(FileAttributes, int(parts[3])),
date=int(parts[4]),
time=int(parts[5]),
)
d.addErrback(lambda reason: reason.check(PartialDownloadError) and reason.value.args[2])
d.addCallback(read)
return d
def lookupByValue(constants, flags):
result = None
for flag in constants.iterconstants():
if flag.value & flags:
if result is None:
result = flag
else:
result |= flag
if result is None:
raise ValueError("File attribute unknown: {}".format(flags))
return result
def get_file_list(treq, root, path):
return execute(treq, root, GetFileList(directory=path))
def download_file(treq, root, path):
uri = root.replace(path=path.segmentsFrom(FilePath(b"/")))
url = uri.asURI().asText().encode("ascii")
return treq.get(url)
def remove_file(treq, root, file):
return execute(treq, root, DeleteFile(file=file))
def execute(treq, root, operation):
uri = operation.uri().replace(
scheme=root.scheme,
host=root.host,
port=root.port,
)
headers = operation.headers()
url = uri.asURI().asText().encode("ascii")
print("Getting", url)
d = treq.get(
url,
headers,
)
d.addCallback(operation.process_response)
return d
@inlineCallbacks
def visit(treq, root_uri, root_directory, visitor):
work = [root_directory]
while work:
path = work.pop()
files = yield get_file_list(treq, root_uri, path)
for f in files:
if f.attributes & FileAttributes.DIRECTLY:
work.append(f.name)
else:
yield visitor(f)
|
exarkun/txflashair
|
src/txflashair/txflashair.py
|
Python
|
mit
| 5,107
|
[
"VisIt"
] |
61757556d1cb02cce82357f84dbfce30b99a3fc852f8f33571006c93eddb5c9d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
===========================================
Converting the pixel format of video frames
===========================================
These components convert the pixel format of video frames, for example, from
interleaved RGB to planar YUV 420.
Example Usage
-------------
Decoding a Dirac encoded video file, then converting it to RGB for display on
a pygame display surface::
Pipeline( RateControlledFileReader("video.drc",readmode="bytes", rate=100000),
DiracDecoder(),
ToRGB_interleaved(),
VideoSurface(),
).run()
Which component for which conversion?
-------------------------------------
The components here are currently capable of the following pixel format
conversions:
===================== ===================== ===========================
From To Which component?
===================== ===================== ===========================
"RGB_interleaved" "RGB_interleaved" ToRGB_interleaved
"YUV420_planar" "RGB_interleaved" ToRGB_interleaved
"YUV422_planar" "RGB_interleaved" ToRGB_interleaved
"RGB_interleaved" "YUV420_planar" ToYUV420_planar
"YUV420_planar" "YUV420_planar" ToYUV420_planar
===================== ===================== ===========================
More details
------------
Send video frames to the "inbox" inbox of these components. They will be
converted to the destination pixel format and sent out of the "outbox" outbox.
Video frames are dictionaries, they must have the following keys:
* "rgb" or "yuv" -- containing the pixel data
* "pixformat" -- the pixel format
* "size" -- (width,height) in pixels
Any other fields will be transparently passed through, unmodified.
These components support sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, these components will pause until able to
send out the data. Data will not be consumed from the inbox if these components
are waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, these components
will complete converting and frames pending in its inbox, and finish sending any
resulting data to its outbox. They will then send the producerFinished message
on out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, these
components will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess
from Axon.AxonExceptions import noSpaceInBox
from pixConvert import rgbi_to_yuv420p # RGB_interleaved to YUV420_planar
from pixConvert import yuv420p_to_rgbi # YUV420_planar to RGB_interleaved
from pixConvert import yuv422p_to_rgbi # YUV422_planar to RGB_interleaved
class ToRGB_interleaved(component):
""""\
ToRGB_interleaved() -> new ToRGB_interleaved component.
Converts video frames sent to its "inbox" inbox, to "RGB_interleaved" pixel
format and sends them out of its "outbox"
Supports conversion from:
* YUV420_planar
* YUV422_planar
* RGB_interleaved (passthrough)
"""
Inboxes = { "inbox" : "Video frame",
"control" : "Shutdown signalling"
}
Outboxes = { "outbox" : "RGB_interleaved pixel format video frame",
"signal" : "Shutdown signalling",
}
def handleControl(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) and not isinstance(self.shutdownMsg, shutdownMicroprocess):
self.shutdownMsg = msg
elif isinstance(msg, shutdownMicroprocess):
self.shutdownMsg = msg
def canStop(self):
self.handleControl()
return isinstance(self.shutdownMsg, (producerFinished,shutdownMicroprocess))
def mustStop(self):
self.handleControl()
return isinstance(self.shutdownMsg, shutdownMicroprocess)
def waitSend(self,data,boxname):
while 1:
try:
self.send(data,boxname)
return
except noSpaceInBox:
if self.mustStop():
raise "STOP"
self.pause()
yield 1
if self.mustStop():
raise "STOP"
def main(self):
"""Main loop."""
self.shutdownMsg = None
try:
while 1:
while self.dataReady("inbox"):
frame = self.recv("inbox")
Y,U,V = frame['yuv']
W,H = frame['size']
newframe = {
'size' : (W,H),
"pixformat" : "RGB_interleaved",
}
if frame['pixformat'] == "RGB_interleaved":
for _ in self.waitSend(frame,"outbox"):
yield _
elif frame['pixformat'] == "YUV420_planar":
newframe["rgb"] = yuv420p_to_rgbi(Y,U,V, W,H)
elif frame['pixformat'] == "YUV422_planar":
newframe["rgb"] = yuv422p_to_rgbi(Y,U,V, W,H)
for key in frame.keys():
if key not in newframe:
newframe[key] = frame[key]
for _ in self.waitSend(newframe,"outbox"):
yield _
if self.canStop():
raise "STOP"
self.pause()
yield 1
except "STOP":
self.send(self.shutdownMsg,"signal")
class ToYUV420_planar(component):
""""\
ToYUV420_planar() -> new ToYUV420_planar component.
Converts video frames sent to its "inbox" inbox, to "ToYUV420_planar" pixel
format and sends them out of its "outbox"
Supports conversion from:
* RGB_interleaved
* YUV420_planar (passthrough)
"""
Inboxes = { "inbox" : "Video frame",
"control" : "Shutdown signalling"
}
Outboxes = { "outbox" : "YUV420_planar pixel format video frame",
"signal" : "Shutdown signalling",
}
def handleControl(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) and not isinstance(self.shutdownMsg, shutdownMicroprocess):
self.shutdownMsg = msg
elif isinstance(msg, shutdownMicroprocess):
self.shutdownMsg = msg
def canStop(self):
self.handleControl()
return isinstance(self.shutdownMsg, (producerFinished,shutdownMicroprocess))
def mustStop(self):
self.handleControl()
return isinstance(self.shutdownMsg, shutdownMicroprocess)
def waitSend(self,data,boxname):
while 1:
try:
self.send(data,boxname)
return
except noSpaceInBox:
if self.mustStop():
raise "STOP"
self.pause()
yield 1
if self.mustStop():
raise "STOP"
def main(self):
"""Main loop."""
self.shutdownMsg = None
try:
while 1:
while self.dataReady("inbox"):
frame = self.recv("inbox")
if frame['pixformat'] == "YUV420_planar":
for _ in self.waitSend(frame,"outbox"):
yield _
elif frame['pixformat'] == "RGB_interleaved":
rgb = frame['rgb']
W,H = frame['size']
newframe = {
"yuv" : rgbi_to_yuv420p(rgb, W,H),
"size" : (W,H),
"pixformat" : "YUV420_planar",
"chroma_size" : (W/2,H/2),
}
for key in frame.keys():
if key not in newframe and key!="rgb":
newframe[key] = frame[key]
for _ in self.waitSend(newframe,"outbox"):
yield _
if self.canStop():
raise "STOP"
self.pause()
yield 1
except "STOP":
self.send(self.shutdownMsg,"signal")
__kamaelia_components__ = ( ToRGB_interleaved, ToYUV420_planar )
|
sparkslabs/kamaelia_
|
Sketches/MH/pixformatConversion/PixFormatConversion.py
|
Python
|
apache-2.0
| 10,021
|
[
"DIRAC"
] |
c680b69065521ec381a60ef002ec2cc2645243d22f0b80bdc782159068781e1c
|
"""Classes for handling parameters and dictionaries of default values."""
# Fitting Hyper parameters:
# Time window for steady-state averaging
# IPI curve initial parameters
# fV initial parameters
# Kinetics initial parameters
# Optimisation routine initial parameters
# ...
from __future__ import print_function, division
from collections import OrderedDict, defaultdict
import logging
import abc
# from numpy import inf, nan, isfinite
import numpy as np
# TODO: Move this to models.py and add .setParams() method
from lmfit import Parameters, Parameter
# Units for model parameters (for Brian)
# Replace with http://pythonhosted.org/NeuroTools/parameters.html
from brian2.units.allunits import psiemens, second, mole
from brian2.units.stdunits import *
pS = psiemens
sec = second
# Units used: ms, mm, mV, Hz, # Nonstd: psiemens, second, mole
# nS, uS
__all__ = ['modelParams', 'modelFits', 'stateLabs', 'defaultOpsinType',
'protParams', 'simParams', 'Parameters', 'Parameter']
logger = logging.getLogger(__name__)
# Hyperparameters
#tFromOff = 50 # Time [ms] to start the sample window before the end of the pulse for Iss
# Optimisation initialisation values
#p0fV = (40,4,1)#25000)#,1) # v0,v1,E,G
#p0FV = (40, 4, 1, 0.025)
#p0IPI = (0.5,4000,-1) # a*exp(-t/b)+c #(-1e-8,400,-1e-7)
# Used if plotKinetics
p0on = (-0.1, 2, -1) # a*exp(-t/b)+c
p0inact = (-0.5, 25, -0.5) # a*exp(-t/b)+c
p0off = (-0.1, 7.5, -0.1, 35, -0.1) # a1*exp(-t/tau1)+a2*exp(-t/tau2)+I_ss
# Add default kinetics parameters
# On phase
pOn = Parameters()
# pOn.add('a0', value=0, expr='-a2')
pOn.add('a1', value=1, min=1e-9)
pOn.add('a2', value=0.1, min=1e-9)
pOn.add('a0', value=0, expr='-a2')
pOn.add('tau_act', value=5, min=1e-9)
pOn.add('tau_deact', value=50, min=1e-9)
# Off phase
# Iss = pOn['a0'].value + pOn['a1'].value
pOffSing = Parameters() # copy.deepcopy(pOn)
# Single exponential
pOffSing.add('a0', value=0) # , expr='{}-a1-a2'.format(Iss))
pOffSing.add('a1', value=0, vary=True)
pOffSing.add('a2', value=-0, vary=False)
pOffSing.add('Gd1', value=0.1) # , min=1e-9)
pOffSing.add('Gd2', value=0, vary=False) # , expr='Gd1')#, min=1e-9)
# Double exponential
pOffDoub = Parameters()
pOffDoub.add('a0', value=0, vary=False)
pOffDoub.add('a1', value=0.1)
pOffDoub.add('a2', value=-0.1) # , expr='{}-a0-a1'.format(Iss))
pOffDoub.add('Gd1', value=0.1) # , min=1e-9)
pOffDoub.add('Gd2', value=0.01) # , vary=True) #, expr='Gd1')#, min=1e-9)
# Default model parameters
modelParams = OrderedDict([('3', Parameters()), ('4', Parameters()), ('6', Parameters())])
modelList = list(modelParams) # List of keys: list(modelParams.keys()) #This could be removed
stateLabs = {3: 'Three', '3': 'Three',
4: 'Four', '4': 'Four',
6: 'Six', '6': 'Six'}
modelFits = OrderedDict([('3', OrderedDict([('ChR2', Parameters()),
('NpHR', Parameters()),
('ArchT', Parameters())])),
('4', OrderedDict([('ChR2', Parameters())])),
('6', OrderedDict([('ChR2', Parameters())]))])
# Replace with defaultdict with default=key
modelLabels = OrderedDict([('E', 'E'), ('g0', 'g_0'), ('p', 'p'),
('k_a', 'k_a'), ('k_r', 'k_r'), ('phi_m', '\phi_m'),
('Gd', 'G_d'), ('Gr0', 'G_{r0}'),
('v0', 'v_0'), ('v1', 'v_1'),
('gam', '\gamma'), ('k1', 'k_1'), ('k2', 'k_2'),
('Gf0', 'G_{f0}'), ('Gb0', 'G_{b0}'),
('k_f', 'k_f'), ('k_b', 'k_b'), ('q', 'q'),
('Gd1', 'G_{d1}'), ('Gd2', 'G_{d2}'),
('Go1', 'G_{o1}'), ('Go2', 'G_{o2}'),
('phi', '\phi'), ('v', 'v')])
modelUnits = OrderedDict([('g0', pS), ('gam', 1),
('k_a', ms**-1), ('k_r', ms**-1),
('phi_m', mm**-2*second**-1), ('p', 1),
('Gd', ms**-1), ('Gr0', ms**-1),
('k1', ms**-1), ('k2', ms**-1),
('Gf0', ms**-1), ('Gb0', ms**-1),
('k_f', ms**-1), ('k_b', ms**-1), ('q', 1),
('Gd1', ms**-1), ('Gd2', ms**-1),
('Go1', ms**-1), ('Go2', ms**-1),
('E', mV), ('v0', mV), ('v1', mV),
('phi', mm**-2*second**-1), ('v', mV)])
#paramUnits
unitLabels = OrderedDict([('g0', 'pS'), ('gam', ''),
('k_a', 'ms^-1'), ('k_r', 'ms^-1'),
('phi_m', 'ph./mm^2/s'), ('p', ''),
('Gd', 'ms^-1'), ('Gr0', 'ms^-1'),
('k1', 'ms^-1'), ('k2', 'ms^-1'),
('Gf0', 'ms^-1'), ('Gb0', 'ms^-1'),
('k_f', 'ms^-1'), ('k_b', 'ms^-1'), ('q', ''),
('Gd1', 'ms^-1'), ('Gd2', 'ms^-1'),
('Go1', 'ms^-1'), ('Go2', 'ms^-1'),
('E', 'mV'), ('v0', 'mV'), ('v1', 'mV'),
('phi', 'ph./mm^2/s'), ('v', 'mV')])
####|###10####|###20####|###30####|###40####|###50####|###60####|###70####|###80
# (Name, Value, Vary, Min, Max, Expr=Units)
modelFits['3']['ChR2'].add_many( # Depolarising: passively transports Na+, H+, K+ and Ca2+ down their electrochemical gradients
('g0', 1.57e5, True, 0.001, 1e6, None),
('phi_m', 5e17, True, 1e15, 1e19, None),
('k_a', 5, True, 0.001, 1000, None),
('k_r', 0.1, True, 0.001, 1000, None),
('p', 0.8, True, 0.1, 5, None),
('q', 0.25, True, 0.1, 5, None),
('Gd', 0.104, True, 0.0001, 1, None),
('Gr0', 0.0002, True, 0.0001, 0.1, None),
('E', 0, True, -1000, 1000, None),
('v0', 43, True, -1e15, 1e15, None),
('v1', 17.1, True, -1e15, 1e15, None))
modelFits['3']['NpHR'].add_many( # Hyperpolarising: pumps chloride ions into the cell
('g0', 1.57e5, True, 0.001, 1e6, None),
('phi_m', 1.32e18,True, 1e15, 1e19, None),
('k_a', 0.01, True, 0.001, 1000, None),
('k_r', 0.01, True, 0.001, 1000, None),
('p', 0.793, True, 0.1, 5, None),
('q', 0.793, True, 0.1, 5, None),
('Gd', 0.1, True, 0.0001, 1, None),
('Gr0', 0.0002, True, 0.0001, 0.1, None),
('E', -400, True, -1000, 1000, None),
('v0', 43, True, -1e15, 1e15, None),
('v1', 17.1, True, -1e15, 1e15, None))
modelFits['3']['ArchT'].add_many( # Hyperpolarising: actively extrudes Hydrogen ions
('g0', 1.57e5, True, 0.001, 1e6, None),
('phi_m', 1.32e18,True, 1e15, 1e19, None),
('k_a', 0.01, True, 0.001, 1000, None),
('k_r', 0.01, True, 0.001, 1000, None),
('p', 0.793, True, 0.1, 5, None),
('q', 0.793, True, 0.1, 5, None),
('Gd', 0.1, True, 0.0001, 1, None),
('Gr0', 0.001, True, 0.0001, 0.1, None),
('E', 0, True, -1000, 1000, None),
('v0', 43, True, -1e15, 1e15, None),
('v1', 17.1, True, -1e15, 1e15, None))
modelFits['4']['ChR2'].add_many(
('g0', 1.14e5, True, 0.001,1e15, None),
('gam', 0.00742,True, 0.0, 1, None),
('phi_m', 2.33e17,True, 1e15, 1e19, None),
('k1', 4.15, True, 0.001,1e5, None), #3
('k2', 0.868, True, 0.001,1e5, None), #1.5
('p', 0.833, True, 0.1, 5, None),
('Gf0', 0.0373, True, 0, 1e3, None), #e12d
('k_f', 0.0581, True, 0.001,1e3, None), #c1
('Gb0', 0.0161, True, 0, 1e3, None), #e21d
('k_b', 0.063, True, 0.001,1e3, None), #c2
('q', 1.94, True, 0.1, 5, None),
('Gd1', 0.105, True, 0.01, 1, None),
('Gd2', 0.0138, True, 0.01, 1, None),
('Gr0', 0.00033,True, 1e-6, 1, None), #Gr #0.0004
('E', 0, True, -1000,1000, None),
('v0', 43, True, -1e15,1e15, None),
('v1', 17.1, True, -1e15,1e15, None))
modelFits['6']['ChR2'].add_many(
('g0', 2.52e4, True, 0.0, 1e15, None),
('gam', 0.0161, True, 0.0, 1, None), # Max=1 if gO1 >= gO2
('phi_m', 3.54e17,True, 1e15, 1e19, None),
('k1', 13.4, True, 0.0, 1000, None),
('k2', 2.71, True, 0.0, 1000, None),
('p', 0.985, True, 0.1, 5, None),
('Gf0', 0.0389, True, 0.0, 1000, None),
('k_f', 0.103, True, 0.0, 1000, None),
('Gb0', 0.0198, True, 0.0, 1000, None),
('k_b', 0.139, True, 0.0, 1000, None),
('q', 1.58, True, 0.1, 5, None),
('Go1', 2, True, 0.0, 1000, None),
('Go2', 0.0567, True, 0.0, 1000, None),
('Gd1', 0.112, True, 0.0, 1000, None),
('Gd2', 0.0185, True, 0.0, 1000, None),
('Gr0', 0.00033,True, 0.0, 1000, None), #0.00163
('E', 0, True, -1000,1000, None),
('v0', 43, True, -1e15, 1e15,None),
('v1', 17.1, True, -1e15, 1e15,None))
defaultOpsinType = 'ChR2'
rhoType = defaultOpsinType # Set this when selecting
modelParams['3'] = modelFits['3'][defaultOpsinType]
modelParams['4'] = modelFits['4'][defaultOpsinType]
modelParams['6'] = modelFits['6'][defaultOpsinType]
unitPrefixes = {} # Use a units library to convert between different prefixes
#Params = OrderedDict([('model', OrderedDict()), ('protocol', OrderedDict()), ('simulator', OrderedDict())])
# p, q: Hill coefficients
# phi_m: Hill constant
#TODO: This needs serious refactoring! Create a superclass which hands off attribute/method calls to a Parameter(s)() attribute by default or looks in self for other properties
#class Parameters(OrderedDict):
class PyRhOparameters(Parameters):
'''These classes are adapted from LMFIT since changes between
0.8.0 and 0.9.2 stopped to ability to set lists as values'''
def __deepcopy__(self, memo):
_pars = PyRhOparameters()
# we're just about to add a lot of Parameter objects to the newly
parameter_list = []
for key, par in self.items():
if isinstance(par, PyRhOparameter):
param = PyRhOparameter(name=par.name,
value=par.value,
min=par.min,
max=par.max)
#param.vary = par.vary
#param.stderr = par.stderr
#param.correl = par.correl
#param.init_value = par.init_value
#param.expr = par.expr
parameter_list.append(param)
_pars.add_many(*parameter_list)
return _pars
def __setitem__(self, key, par):
#if key not in self:
# if not valid_symbol_name(key):
# raise KeyError("'%s' is not a valid Parameters name" % key)
if par is not None and not isinstance(par, (Parameter, PyRhOparameter)):
raise ValueError("'%s' is not a Parameter" % par)
OrderedDict.__setitem__(self, key, par)
par.name = key
#par._expr_eval = self._asteval
#self._asteval.symtable[key] = par.value
def add_many(self, *parlist):
"""
Convenience function for adding a list of Parameters.
Parameters
----------
parlist : sequence
A sequence of tuples, or a sequence of `Parameter` instances. If it
is a sequence of tuples, then each tuple must contain at least the
name. The order in each tuple is the following:
name, value, vary, min, max, expr
Example
-------
p = Parameters()
# add a sequence of tuples
p.add_many( (name1, val1, True, None, None, None),
(name2, val2, True, 0.0, None, None),
(name3, val3, False, None, None, None),
(name4, val4))
# add a sequence of Parameter
f = Parameter('name5', val5)
g = Parameter('name6', val6)
p.add_many(f, g)
"""
for para in parlist:
if isinstance(para, PyRhOparameter):
self.__setitem__(para.name, para)
else:
param = PyRhOparameter(*para)
self.__setitem__(param.name, param)
def valuesdict(self):
"""
Returns
-------
An ordered dictionary of name:value pairs for each Parameter.
This is distinct from the Parameters itself, as it has values of
the Parameter values, not the full Parameter object.
"""
return OrderedDict(((p.name, p.value) for p in self.values()))
class PyRhOparameter(object):
def __init__(self, name=None, value=None, min=-np.inf, max=np.inf,
units=None, latex=None, descr=None): # , unitsLabel=None
self.name = name
self._val = value
self._min = -np.inf
self._max = np.inf
self.min = min
self.max = max
self.units = units
# self.label = label
self.unitsLabel = str(self.units) # unitsLabel
self.latex = latex
self.descr = descr
self.constant = True
self._init_bounds()
def set(self, value=None, vary=None, min=None, max=None, expr=None):
"""
Set or update Parameter attributes (adapted from LMFIT).
Parameters
----------
value : float, optional
Numerical Parameter value.
vary : bool, optional
Whether the Parameter is fixed during a fit.
min : float, optional
Lower bound for value(s). To remove a lower bound you must use -np.inf
max : float, optional
Upper bound for value(s). To remove an upper bound you must use np.inf
"""
# self.__set_expression(expr)
if value is not None:
self._val = value
if vary is not None:
self.vary = vary
if min is not None:
self.min = min
if max is not None:
self.max = max
'''
def set(self, value=None):
if value is not None:
self._val = value
'''
def _init_bounds(self):
"""make sure initial bounds are self-consistent"""
# _val is None means - infinity.
# _val is None means - infinity.
if self._val is not None:
if isinstance(self._val, str):
return
elif isinstance(self._val, (list, tuple)):
self._clipList(self._val)
else:
if self.max is not None and self._val > self.max:
self._val = self.max
if self.min is not None and self._val < self.min:
self._val = self.min
elif self.min is not None: # and self._expr is None:
self._val = self.min
elif self.max is not None: # and self._expr is None:
self._val = self.max
# self.setup_bounds()
def _clipList(self, values):
for ind, val in enumerate(values):
if isinstance(val, str):
return
elif isinstance(val, (list, tuple)): # Nested list e.g. cycles
self._clipList(val)
else:
if self.max is not None and val > self.max:
values[ind] = self.max
if self.min is not None and val < self.min:
values[ind] = self.min
def get_max(self):
return self._max
def set_max(self, val):
if val is None:
val = np.inf
self._max = val
if self.min > self.max:
self._min, self._max = self.max, self.min
if np.isclose(self.min, self.max, atol=1e-13, rtol=1e-13):
raise ValueError("Parameter '%s' has min == max" % self.name)
def get_min(self):
return self._min
def set_min(self, val):
if val is None:
val = -np.inf
self._min = val
if self.min > self.max:
self._min, self._max = self.max, self.min
if np.isclose(self.min, self.max, atol=1e-13, rtol=1e-13):
raise ValueError("Parameter '%s' has min == max" % self.name)
min = property(get_min, set_min)
max = property(get_max, set_max)
def _getval(self):
return self._val
@property
def value(self):
return self._getval() # self._val #
@value.setter
def value(self, val):
self._val = val
self._init_bounds()
def __repr__(self):
s = []
if self.name is not None:
s.append("'%s'" % self.name)
sval = repr(self._getval())
#if not self.vary and self._expr is None:
# sval = "value=%s (fixed)" % sval
#elif self.stderr is not None:
# sval = "value=%s +/- %.3g" % (sval, self.stderr)
s.append(sval)
s.append("bounds=[%s:%s]" % (repr(self.min), repr(self.max)))
#if self._expr is not None:
# s.append("expr='%s'" % self.expr)
return "<Parameter %s>" % ', '.join(s)
def __str__(self):
return self.__repr__()
# TODO: Revise latex representation to fallback gracefully if not IPY
def _repr_latex_(self):
if self.latex is not None:
s = self.latex
else:
s = self.name
if self._val is not None:
'''
if isinstance(self._val, (list, tuple, np.ndarray)):
#v = ",\,".join(str(self._val))
#v = "\[" + v + "\]"
v = str(self._val)
else:
v = self._val
'''
v = str(self._val)
if self.units is not None:
#u = "[{u}]".format(u=self.units._latex())
u = self.units._latex()
s = "\,".join([s, '=', v, u])
return "$" + s + "$" # texIt(s)
def latex(self):
if IPY:
from IPython.display import Math
return Math(self._repr_latex_())
else:
return self._repr_latex_()
# Params['g0'] = PyRhOparameter('g0', 2.5e4, psiemens, 'pS', 'g_0',
# 'Biological scaling factor for rhodopsin conductance')
####|###10####|###20####|###30####|###40####|###50####|###60####|###70####|###80
### Protocols to be included in the next version:
### - Temperature (Q10)
### - pH (intracellular and extracellular)
### - Wavelength (lambda)
#protParams = OrderedDict([('step',Parameters()), ('delta',Parameters()), ('sinusoid',Parameters()), ('chirp',Parameters()), ('ramp',Parameters()), ('rectifier',Parameters()), ('shortPulse',Parameters()), ('recovery',Parameters()), ('custom',Parameters())])
protParams = OrderedDict([('step', PyRhOparameters()),
('delta', PyRhOparameters()),
('sinusoid', PyRhOparameters()),
('chirp', PyRhOparameters()),
('ramp', PyRhOparameters()),
('rectifier', PyRhOparameters()),
('shortPulse', PyRhOparameters()),
('recovery', PyRhOparameters()),
('custom', PyRhOparameters())])
protList = list(protParams) # List of keys #This could be removed
protParamLabels = OrderedDict([('phis', '\mathbf{\phi}'),
('Vs', '\mathbf{\mathrm{V}}'),
('Dt_delay', '\Delta t_{delay}'),
('Dt_on', '\Delta t_{on}'),
('Dt_total', 'T_{total}'),
('cycles', 'cycles'),
('phi0', '\phi_0'),
('fs', '\mathbf{f}'),
('f0', 'f_0'),
('fT', 'f_T'),
('linear', 'linear'),
('startOn', '\phi_{t=0}>0'),
#('phi_ton', '\phi_{t=0}'),
('pDs', '\mathbf{\Delta t_{on}}'),
('Dt_IPIs', '\mathbf{\Delta t_{off}}'),
('phi_ft', '\phi(t)')])
protUnitLabels = defaultdict(lambda: '')
protUnitLabels['phis'] = 'ph./mm^2/s'
protUnitLabels['phi0'] = 'ph./mm^2/s'
# protUnitLabels['phi_ton'] = 'ph./mm^2/s' ### Revise!!!
protUnitLabels['Vs'] = 'mV'
protUnitLabels['Dt_delay'] = 'ms'
protUnitLabels['Dt_on'] = 'ms'
protUnitLabels['cycles'] = 'ms'
protUnitLabels['pDs'] = 'ms'
protUnitLabels['Dt_IPIs'] = 'ms'
protUnitLabels['Dt_total'] = 'ms'
protUnitLabels['fs'] = 'Hz'
protUnitLabels['f0'] = 'Hz'
protUnitLabels['fT'] = 'Hz'
protParamNotes = OrderedDict([(prot, defaultdict(lambda: '')) for prot in protList])
for prot in protList:
protParamNotes[prot]['phis'] = 'List of flux values'
protParamNotes[prot]['Vs'] = 'List of voltage clamp values (if applied)'
protParamNotes[prot]['Dt_delay'] = 'Delay duration before the first pulse' # cycle'
protParamNotes[prot]['cycles'] = 'List of [on, off] durations for each pulse' # cycle'
#Exceptions
protParamNotes['custom']['phi_ft'] = 'Pulse generation function'
protParamNotes['sinusoid']['startOn'] = 'Start at maximum flux (else minimum)' # maximum of flux modulation
protParamNotes['sinusoid']['phi0'] = 'Constant offset for modulation'
protParamNotes['sinusoid']['fs'] = 'List of modulation frequencies'
protParamNotes['chirp']['linear'] = 'Linear frequency sweep (else exponential)'
protParamNotes['chirp']['startOn'] = 'Start at maximum flux (else minimum)'
protParamNotes['chirp']['phi0'] = 'Constant offset for modulation'
protParamNotes['chirp']['f0'] = 'Starting frequency'
protParamNotes['chirp']['fT'] = 'Ending frequency'
protParamNotes['ramp']['phis'] = 'List of ending flux values'
#protParamNotes['ramp']['phi_ton'] = 'Starting flux value'
protParamNotes['ramp']['phi0'] = 'Constant offset for flux values'
protParamNotes['delta']['cycles'] = ''
protParamNotes['delta']['Dt_on'] = 'On-phase duration'
protParamNotes['delta']['Dt_total'] = 'Total simulation duration'
protParamNotes['shortPulse']['cycles'] = ''
protParamNotes['shortPulse']['pDs'] = 'List of pulse on-phase durations' #'List of cycle on-phase durations'
protParamNotes['shortPulse']['Dt_total'] = 'Total simulation duration'
protParamNotes['recovery']['cycles'] = ''
protParamNotes['recovery']['Dt_on'] = 'Pulse on-phase duration' #'Cycle on-phase duration'
protParamNotes['recovery']['Dt_IPIs'] = 'List of pulse off-phase durations' #'List of cycle off-phase durations'
protParamNotes['recovery']['Dt_total'] = 'Total simulation duration'
#squarePulses = ['custom', 'delta', 'step', 'rectifier', 'shortPulse', 'recovery'] #{'custom': True, 'delta': True, 'step': True, 'rectifier': True, 'shortPulse': True, 'recovery': True}
#arbitraryPulses = ['custom', 'sinusoid', 'chirp', 'ramp'] #{'custom': True, 'sinusoid': True, 'chirp': True, 'ramp':True} # Move custom here
smallSignalAnalysis = ['delta', 'step', 'sinusoid']
protParams['custom'].add_many(('phis', [1e16,1e17], 0, None, mole*mm**-2*second**-1, '\mathbf{\phi}', 'List of flux values'), #'photons/s/mm^2'
('Vs', [-70,-20,10], None, None, mV, '\mathbf{\mathrm{V}}', 'List of voltage clamp values (if applied)'), #'mV'
('Dt_delay',25, 0, 1e9, ms, '\Delta t_{delay}', 'Delay duration before the first pulse'), #'ms'
('cycles', [[150.,50.]], 0, None, ms, 'cycles', 'List of [on, off] durations for each pulse'))#, #'ms'#,
protParams['step'].add_many(('phis', [1e16,1e17], 0, None, mole*mm**-2*second**-1, '\mathbf{\phi}', 'List of flux values'), #'photons/s/mm^2'
('Vs', [-70,-40,-10,10,40,70], None, None, mV, '\mathbf{\mathrm{V}}', 'List of voltage clamp values (if applied)'), #'mV'
('Dt_delay',25, 0, 1e9, ms, '\Delta t_{delay}', 'Delay duration before the first pulse'), #'ms'
('cycles', [[150.,100.]], 0, None, ms, 'cycles', 'List of [on, off] durations for each pulse')) #'ms'
protParams['sinusoid'].add_many(('phis',[1e12], 0, None, mole*mm**-2*second**-1, '\mathbf{\phi}', 'List of flux values'), #'photons/s/mm^2'
('phi0', [0], None, None, mole*mm**-2*second**-1, '\phi_0', 'Constant offset for flux'), #'photons/s/mm^2'
('startOn', True, False, True, 1, '\phi_{t=0}>0', 'Start at maximum flux (else minimum)'),
('Vs', [-70], None, None, mV, '\mathbf{\mathrm{V}}', 'List of voltage clamp values (if applied)'), #'mV'
('fs', [0.1,0.5,1,5,10], 0, None, Hz, '\mathbf{f}', 'List of modulation frequencies'), #'Hz' #50, 100, 500, 1000
('Dt_delay',25, 0, 1e9, ms, '\Delta t_{delay}', 'Delay duration before the first pulse'), #'ms'
('cycles', [[10000.,50.]], 0, None, ms, 'cycles', 'List of [on, off] durations for each pulse')) #'ms'
protParams['chirp'].add_many(('phis', [1e12], None, None, mole*mm**-2*second**-1, '\mathbf{\phi}', 'List of flux values'), # 'photons/s/mm^2'
('phi0', [0], None, None, mole*mm**-2*second**-1, '\phi_0', 'Constant offset for flux'), # 'photons/s/mm^2'
('linear', True, False, True, 1, 'linear', 'Linear frequency sweep (else exponential)'), # False := exponential
('startOn', False, False, True, 1, '\phi_{t=0}>0', 'Start at maximum flux (else minimum)'),
('Vs', [-70], None, None, mV, '\mathbf{\mathrm{V}}', 'List of voltage clamp values (if applied)'), # 'mV'
('Dt_delay',100, 0, 1e9, ms, '\Delta t_{delay}', 'Delay duration before the first pulse'), # 'ms'
('cycles', [[10000.,100.]], 0, None, ms, 'cycles', 'List of [on, off] durations for each pulse'), # 'ms'
('f0', 0.1, 0, None, Hz, 'f_0', 'Starting frequency'), # 'Hz'
('fT', 1000, 0, None, Hz, 'f_T', 'Ending frequency')) # 'Hz'
protParams['ramp'].add_many(('phis', [1e16,1e17,1e18], None, None, mole*mm**-2*second**-1, '\mathbf{\phi}', 'List of flux values'), # 'photons/s/mm^2' #1e12,1e13,1e14,1e15,
('phi0', 0, None, None, mole*mm**-2*second**-1, '\phi_0', 'Constant offset for flux'), # 'photons/s/mm^2'
('Vs', [-70], None, None, mV, '\mathbf{\mathrm{V}}', 'List of voltage clamp values (if applied)'), # 'mV'
('Dt_delay',25, 0, 1e9, ms, '\Delta t_{delay}', 'Delay duration before the first pulse'), # 'ms'
('cycles', [[250.,25.]], 0, None, ms, 'cycles', 'List of [on, off] durations for each pulse')) # 'ms'#,
protParams['delta'].add_many(('phis', [1e20], None, None, mole*mm**-2*second**-1, '\mathbf{\phi}', 'List of flux values'), # 'photons/s/mm^2'
('Vs', [-70], None, None, mV, '\mathbf{\mathrm{V}}', 'List of voltage clamp values (if applied)'), # 'mV'
('Dt_delay',5, 0, 1e9, ms, '\Delta t_{delay}', 'Delay duration before the first pulse'), # 'ms'
('Dt_on', 1e-3, 0, 1e9, ms, '\Delta t_{on}', 'On-phase duration'), # 'ms'
('Dt_total',25., 0, None, ms, 'T_{total}', 'Total simulation duration')) # 'ms'
protParams['rectifier'].add_many(('phis',[1e16], None, None, mole*mm**-2*second**-1, '\mathbf{\phi}', 'List of flux values'), # 'photons/s/mm^2' # Change to 1e17?
('Vs', [-100,-70,-40,-10,20,50,80],None,None, mV, '\mathbf{\mathrm{V}}', 'List of voltage clamp values (if applied)'), # 'mV' #[-100,-80,-60,-40,-20,0,20,40,60,80]
('Dt_delay',50, 0, 1e9, ms, '\Delta t_{delay}', 'Delay duration before the first pulse'), # 'ms'
('cycles', [[250.,100.]], None, None, ms, 'cycles', 'List of [on, off] durations for each pulse')) # 'ms' #,
protParams['shortPulse'].add_many(('phis',[1.5e15], None, None, mole*mm**-2*second**-1, '\mathbf{\phi}', 'List of flux values'), # 'photons/s/mm^2' #1e12
('Vs', [-70], None, None, mV, '\mathbf{\mathrm{V}}', 'List of voltage clamp values (if applied)'), # 'mV'
('Dt_delay',25, 0, None, ms, '\Delta t_{delay}', 'Delay duration before the first pulse'), # 'ms'
('pDs', [1,2,3,5,8,10,20], 0, None, ms, '\mathbf{\Delta t_{on}}', 'List of pulse on-phase durations'), # 'ms' # [0.1, 0.2, 0.5, 1, 2, 5, 10]
('Dt_total',100., 0, None, ms, 'T_{total}', 'Total simulation duration')) # 'ms'
protParams['recovery'].add_many(('phis',[1e17], None, None, mole*mm**-2*second**-1, '\mathbf{\phi}', 'List of flux values'), # 'photons/s/mm^2'
('Vs', [-70], None, None, mV, '\mathbf{\mathrm{V}}', 'List of voltage clamp values (if applied)'), # 'mV'
('Dt_delay',100, 0, None, ms, '\Delta t_{delay}', 'Delay duration before the first pulse'), # 'ms'
('Dt_on', 100, 0, None, ms, '\Delta t_{on}', 'On-phase duration'), # 'ms'
('Dt_IPIs',[500,1000,1500,2500,5000,7500,10000],None,None,ms, '\mathbf{\Delta t_{off}}', 'List of pulse off-phase durations'), # 'ms'
#('Dt_IPIs',[0.5,1,1.5,2.5,5,7.5,10],None,None,seconds), # 'ms'
('Dt_total',12000, 0, None, ms, 'T_{total}', 'Total simulation duration')) # 'ms'
simUnitLabels = defaultdict(lambda: '')
simUnitLabels['dt'] = 'ms'
simUnitLabels['v_init'] = 'mV'
simParamNotes = defaultdict(lambda: '')
simParamNotes['cell'] = 'List of hoc files'
simParamNotes['Vclamp'] = 'Use voltage clamp'
simParamNotes['Vcomp'] = 'Compartment to record from'
simParamNotes['expProb'] = 'Expresssion probability'
simParamNotes['v_init'] = 'Initialisation voltage'
simParamNotes['CVode'] = 'Use variable timestep integrator'
simParamNotes['dt'] = 'Numerical integration timestep'
#simParams = OrderedDict([('Python',Parameters()), ('NEURON',Parameters()), ('Brian',Parameters())])
simParams = OrderedDict([('Python', PyRhOparameters()),
('NEURON', PyRhOparameters()),
('Brian', PyRhOparameters())])
simList = list(simParams)
simParams['Python'].add_many(('dt', 0.1, 0, None)) # 'ms'
# atol
simParams['NEURON'].add_many(('cell', ['minimal.hoc'], None, None), #'morphology'
('Vclamp', False, False, True),
('Vcomp', 'soma', None, None),
('expProb',1.0, 0., 1.),
('v_init', -65, None, None), # 'mV'
('CVode', False, False, True),
('dt', 0.1, 0, None)) # 'ms' #, 0.025
simParams['Brian'].add_many(('dt', 0.1, 0, None)) # 'ms'
### Move somewhere else e.g. base.py
class PyRhOobject(object):
"""Common base class for all PyRhO objects."""
__metaclass__ = abc.ABCMeta
# https://docs.python.org/3/reference/datamodel.html#special-method-names
#def __new__(self):
# pass
@abc.abstractmethod
def __init__(self):
pass
def __del__(self):
pass
def __repr__(self):
return str(self.__class__)
def __str__(self):
print("PyRhO object: ", self.__class__.__name__)
def __call__(self):
return
def setParams(self, params):
"""Set all model parameters from a Parameters() object."""
#for param, value in params.items():
#for p in params.keys():
# self.__dict__[p] = params[p].value #vars(self())[p]
#for p in params.keys():
# setattr(self, p, params[p].value)
for name, value in params.valuesdict().items():
setattr(self, name, value)
#for name, value in params.items():
# setattr(self, name, value)
def updateParams(self, params):
"""Update model parameters which already exist."""
pDict = params.valuesdict()
count = 0
for name, value in pDict.items():
if hasattr(self, name):
setattr(self, name, value)
count += 1
#for p, v in pDict.items():
# if p in self.__dict__: # Added to allow dummy variables in fitting parameters
# self.__dict__[p] = v #vars(self())[p]
# count += 1
return count
def getParams(self, params):
"""Export parameters to lmfit dictionary."""
for p in self.__dict__.keys():
params[p].value = self.__dict__[p]
def exportParams(self, params):
"""Export parameters which are already in lmfit dictionary."""
count = 0
for p, v in self.__dict__.items():
if p in params:
params[p].value = v # self.__dict__[p]
count += 1
return count
def printParams(self):
for p in self.__dict__.keys():
print(p, ' = ', self.__dict__[p])
def logParams(self):
"""Log parameters."""
logger.info('Parameters for ' + self.__class__.__name__)
for p in self.__dict__.keys():
logger.info(' '.join([p, ' = ', str(self.__dict__[p])]))
def printParamsWithLabels(self):
for p in self.__dict__.keys():
if p in unitLabels:
print(p, ' = ', self.__dict__[p], ' [', unitLabels[p], ']')
else:
print(p, ' = ', self.__dict__[p])
def printParamsWithUnits(self):
for p in self.__dict__.keys():
if p in modelUnits:
print(p, ' = ', self.__dict__[p], ' * ', modelUnits[p])
else:
print(p, ' = ', self.__dict__[p])
def getExt(self, var, ext='max'):
if ext == 'max':
mVal = max(self.__dict__[var])
elif ext == 'min':
mVal = min(self.__dict__[var])
mInd = np.searchsorted(self.__dict__[var], mVal)
return mVal, mInd
|
ProjectPyRhO/PyRhO
|
pyrho/parameters.py
|
Python
|
bsd-3-clause
| 36,387
|
[
"Brian",
"NEURON"
] |
d15030973c267ee2c810bcaeef3bf0eec1f8605d00e9cfda67249d51983f788c
|
"""For backward compatibility only, pulls app_factor from galaxy.webapps.main"""
from galaxy.webapps.galaxy.buildapp import app_factory
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/web/buildapp.py
|
Python
|
gpl-3.0
| 137
|
[
"Galaxy"
] |
4f2427a617dc30ad2d1e07ed36d6898031144e13455194c1a70bf43a4438ed9b
|
"""
PyArchiver Compression and Archiving Library
@author: Clivern U{hello@clivern.com}
"""
from __future__ import print_function
import os
import bz2
import gzip
import tarfile
class TarPack(object):
""" Read and Write TAR files """
def __init__(self, file, mode = 'r'):
"""
Init class instance
:param file : Path to a new or an existing tar archive
:param mode : The mode parameter. defaults 'r' also it may be 'w' or 'a' and x (for more info. visit https://docs.python.org/3.5/library/tarfile.html#tarfile.open)
:return Null
.. versionadded:: 1.0.0
"""
self._TAR = tarfile.TarFile(file, mode)
def setFiles(self, files):
"""
Set the files that we need to compress or decompress
Files must be a the absoulte path
:param files: A list of files
:return Instance of the object
.. versionadded:: 1.0.0
"""
self._Files = files
return self
def add(self):
"""
Add the file name to the archive.
.. name may be any type of file (directory, fifo, symbolic link, etc.).
.. If given, arcname specifies an alternative name for the file in the archive.
:return Instance of the object
.. versionadded:: 1.0.0
"""
for name, arcname in self._Files:
self._TAR.add(name, arcname)
return self
def extract(self, member, path = None):
"""
Extract a member from the archive to the current working directory or specific path.
.. Never extract archives from untrusted sources without prior inspection.
.. It is possible that files are created outside of path,
.. e.g. members that have absolute filenames starting with "/" or filenames with two dots ".."
:param member a member must be its full name or a TarInfo object
:param path a different directory to extract to
.. versionadded:: 1.0.0
"""
return self._TAR.extract(member, path)
def extractAll(self, path = None):
"""
Extract all members from the archive to the current working directory or specific path.
:param path a different directory to extract to
:return Instance of the object
.. versionadded:: 1.0.0
"""
return self._TAR.extractall(path)
def close(self):
"""
Closes the archive file or essential records will not be written.
:return Instance of the object
.. versionadded:: 1.0.0
"""
self._TAR.close()
return self
def setInfo(self):
"""
Store info data about archive
:param name
:param mode
:param pwd
:return Instance of the object
.. versionadded:: 1.0.0
"""
#: A list containing a TarInfo object for each member of the archive.
#: The objects are in the same order as their entries in the actual TAR file on disk
#: if an existing archive was opened.
self._TAR_INFO_LIST = self._TAR.getmembers()
#: A list of archive members by name.
self._TAR_NAME_LIST = self._TAR.getnames()
return self
def getInfo(self, name):
"""
Get a TarInfo object for member name
:param name member name
:return TarInfo object
.. versionadded:: 1.0.0
"""
return self._TAR.getmember(name)
def getInfoList(self):
"""
Returns a list containing a TarInfo object for each member of the archive.
The objects are in the same order as their entries in the actual TAR file on disk
if an existing archive was opened.
:return list
.. versionadded:: 1.0.0
"""
return self._TAR_INFO_LIST
def getNamesList(self):
"""
Return a list of archive members by name.
:return list
.. versionadded:: 1.0.0
"""
return self._TAR_NAME_LIST
def isTarFileName(self, filename):
"""
Check if path is to a TAR archive
:param file_path : Absolute path to a TAR archive
:return boolean <whether path is to TAR archive but it may not exist>
.. versionadded:: 1.0.0
"""
return filename.endswith('.tar.gz')
def isTarFile(self, file_path):
"""
Check if path is to existing TAR archive
:param file_path : Absolute path to TAR archive
:return boolean <whether TAR archive exist or not>
.. versionadded:: 1.0.0
"""
return file_path.endswith('.tar.gz') and os.path.isfile(file_path)
|
Clivern/PyArchiver
|
pyarchiver/tarpack.py
|
Python
|
mit
| 4,216
|
[
"VisIt"
] |
510fce07a09f40d64083fb18760d7c0ccb3db971e820b8f0049f53f6dac91394
|
# ! /usr/bin/python2
# -*- coding: utf-8; -*-
#
# (c) 2013 booya (http://booya.at)
#
# This file is part of the OpenGlider project.
#
# OpenGlider is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# OpenGlider is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenGlider. If not, see <http://www.gnu.org/licenses/>.
# A simple graphics library using vtk and aiming to have a similar syntax as mathematica graphics
import sys
import vtk
from openglider.graphics.functions import depth, tofloat
from openglider.graphics.elements import *
# from openglider.graphics.qt import ApplicationWindow
# problems with this line on ubuntu 15.04
class Graphics(object):
"""Creates a Graphics Instance"""
def __init__(self, graphicobjects, coordinates=None, rotation=True, show=True):
self.allow_rotation = rotation
self.coordinates = coordinates
self.graphicobjects = graphicobjects
self.vtk_cells = {}
self.data = vtk.vtkPolyData()
self.points = vtk.vtkPoints()
self.colours = vtk.vtkUnsignedCharArray()
self.default_colour = [255, 255, 255] # white
self.colours.SetNumberOfComponents(3)
self.colours.SetName("Colours")
self.actor = vtk.vtkActor()
self.redraw()
if show:
self.show()
@staticmethod
def make_3d(arg):
if len(arg) == 2:
return [arg[0], arg[1], 0.]
elif len(arg) == 3:
return list(arg)
else:
raise ValueError("Only 2D- or 3D-Vectors allowed")
def redraw(self):
self.data.Reset()
self.points.Reset()
self.colours.Reset()
if not self.coordinates is None:
for coor in self.coordinates:
self.points.InsertNextPoint(self.make_3d(coor))
#BUGFIX, this disables colours partly for polylines, colour has to be set in Line(points, colour=...)
#for graphicobject in self.graphicobjects:
# graphicobject.draw(self)
for graphicobject in self.graphicobjects:
if hasattr(graphicobject, 'element_setter') and graphicobject.element_setter == 'SetLines':
graphicobject.draw(self)
for graphicobject in self.graphicobjects:
if not hasattr(graphicobject, 'element_setter') or not graphicobject.element_setter == 'SetLines':
graphicobject.draw(self)
self.data.SetPoints(self.points)
# Set element types (zb: self.data.SetPolys(poly_cell)
for el_cls, el_cell_array in self.vtk_cells.items():
if el_cls.element_setter is not None:
getattr(self.data, el_cls.element_setter)(el_cell_array)
self.data.GetCellData().SetScalars(self.colours)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(self.data)
self.actor.SetMapper(mapper)
def show(self):
self.renderer = vtk.vtkRenderer()
self.renderer.SetBackground(0.1, 0.2, 0.4) # Blue
self.renderer.ResetCamera()
self.renderer.AddActor(self.actor)
render_window = vtk.vtkRenderWindow()
render_window.SetSize(700, 700)
render_window.AddRenderer(self.renderer)
render_interactor = vtk.vtkRenderWindowInteractor()
if self.allow_rotation:
render_interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
else:
render_interactor.SetInteractorStyle(vtk.vtkInteractorStyleRubberBand2D())
render_interactor.SetRenderWindow(render_window)
render_interactor.Initialize()
render_interactor.Start()
def get_cell(self, graphics_cls):
"""
Get a vtkCellArray container
"""
if graphics_cls not in self.vtk_cells:
self.vtk_cells[graphics_cls] = vtk.vtkCellArray()
return self.vtk_cells[graphics_cls]
def get_points(self, *points):
return [self.points.GetPoint(point_no) for point_no in points]
class Graphics3D(Graphics):
def __init__(self, graphicsobject, coordinates=None):
super(Graphics3D, self).__init__(graphicsobject, coordinates, rotation=True)
class Graphics2D(Graphics):
def __init__(self, graphicsobject, coordinates=None):
super(Graphics2D, self).__init__(graphicsobject, coordinates, rotation=False)
def show(*graphics):
allow_rotation = True
render_window = vtk.vtkRenderWindow()
render_window.SetSize(700, 700)
render_interactor = vtk.vtkRenderWindowInteractor()
if allow_rotation:
render_interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
else:
render_interactor.SetInteractorStyle(vtk.vtkInteractorStyleRubberBand2D())
renderer = vtk.vtkRenderer()
renderer.SetBackground(0.1, 0.2, 0.4) # Blue
renderer.ResetCamera()
for g in graphics:
g.redraw()
renderer.AddActor(g.actor)
render_interactor.SetRenderWindow(render_window)
render_window.AddRenderer(renderer)
render_interactor.Initialize()
render_interactor.Start()
def listlineplot(points):
if isinstance(points, np.ndarray):
points = points.tolist()
if depth(points) == 2:
Graphics2D([Line(np.transpose(np.array([map(float, range(len(points))), points])))])
if depth(points) == 3 and len(points[1]) == 2:
Graphics2D([Line(tofloat(points))])
if depth(points) == 3 and len(points[1]) == 3:
Graphics3D([Line(tofloat(points))])
def draw_glider(glider, num=0, mirror=True, panels=True):
if mirror:
temp = glider.copy_complete()
else:
temp = glider
ribs = temp.return_ribs(num)
if panels:
points = np.concatenate(ribs)
polygons = temp.return_polygon_indices(ribs)
Graphics([Polygon(polygon) for polygon in polygons], points)
else:
Graphics([Line(rib) for rib in ribs])
return True
|
hiaselhans/OpenGlider
|
openglider/graphics/__init__.py
|
Python
|
gpl-3.0
| 6,340
|
[
"VTK"
] |
b3689156b0c3ca3102d1efe463495b39e491035fc5a406ee4cc9da622f0e0a6f
|
import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_hosts, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_umass(self):
h2o.beta_features = True
csvFilenameList = [
('cgd.dat', 'gaussian', 12, 5, None),
('chdage.dat', 'binomial', 2, 5, None),
# leave out ID and birth weight
('clslowbwt.dat', 'binomial', 7, 30, [1,2,3,4,5]),
('icu.dat', 'binomial', 1, 30, None),
# need to exclude col 0 (ID) and col 10 (bwt)
# but -x doesn't work..so do 2:9...range doesn't work? FIX!
('lowbwt.dat', 'binomial', 1, 30, [2,3,4,5,6,7,8,9]),
('lowbwtm11.dat', 'binomial', 1, 30, None),
('meexp.dat', 'gaussian', 3, 30, None),
('nhanes3.dat', 'binomial', 15, 30, None),
('pbc.dat', 'gaussian', 1, 30, None),
('pharynx.dat', 'gaussian', 12, 30, None),
('pros.dat', 'binomial', 1, 30, None),
('uis.dat', 'binomial', 8, 30, None),
]
trial = 0
for i in range(3):
for (csvFilename, family, y, timeoutSecs, x) in csvFilenameList:
csvPathname = "logreg/umass_statdata/" + csvFilename
kwargs = {'n_folds': 3, 'response': y, 'family': family, 'alpha': 1, 'lambda': 1e-4}
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put',
timeoutSecs=timeoutSecs)
if x is not None:
ignored_cols = h2o_cmd.createIgnoredCols(key=parseResult['destination_key'],
cols=x, response=y)
kwargs['ignored_cols'] = ignored_cols
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
print "glm end (w/check) on ", csvPathname, 'took', time.time() - start, 'seconds'
trial += 1
print "\nTrial #", trial
if __name__ == '__main__':
h2o.unit_main()
|
woobe/h2o
|
py/testdir_single_jvm/test_GLM2_umass.py
|
Python
|
apache-2.0
| 2,634
|
[
"Gaussian"
] |
d4ca89d36d4dfe3eb20e649082153fed8c8428e982ba088e62604b4431382151
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import numpy as np
import mdtraj as md
from mdtraj.testing import get_fn, eq, DocStringFormatTester, skipif
from mdtraj.formats import mol2
from distutils.spawn import find_executable
from mdtraj.utils import enter_temp_directory
import tarfile
import pickle
import os
import numpy as np
import scipy.sparse
import subprocess
doc = DocStringFormatTester(mol2)
def test_load_mol2():
trj = md.load(get_fn('imatinib.mol2'))
ref_trj = md.load(get_fn('imatinib.pdb'))
eq(trj.xyz, ref_trj.xyz)
ref_top, ref_bonds = ref_trj.top.to_dataframe()
top, bonds = trj.top.to_dataframe()
eq(bonds, ref_bonds)
@skipif(find_executable('obabel') is None, 'You need obabel installed to run this test')
@skipif(os.environ.get("TRAVIS", None) == 'true', "Skip testing of entire FreeSolv database on Travis.")
def test_load_freesolv_gaffmol2_vs_sybylmol2_vs_obabelpdb():
with enter_temp_directory():
tar_filename = "freesolve_v0.3.tar.bz2"
tar = tarfile.open(get_fn(tar_filename), mode="r:bz2")
tar.extractall()
tar.close()
database = pickle.load(open("./v0.3/database.pickle"))
for key in database:
gaff_filename = "./v0.3/mol2files_gaff/%s.mol2" % key
pdb_filename = "./v0.3/mol2files_gaff/%s.pdb" % key
sybyl_filename = "./v0.3/mol2files_sybyl/%s.mol2" % key
cmd = "obabel -imol2 %s -opdb > %s 2>/dev/null" % (sybyl_filename, pdb_filename)
assert os.system(cmd) == 0
t_pdb = md.load(pdb_filename)
t_gaff = md.load(gaff_filename)
t_sybyl = md.load(sybyl_filename)
eq(t_pdb.n_atoms, t_gaff.n_atoms)
eq(t_pdb.n_atoms, t_sybyl.n_atoms)
eq(t_pdb.n_frames, t_gaff.n_frames)
eq(t_pdb.n_frames, t_gaff.n_frames)
eq(t_pdb.xyz, t_gaff.xyz, decimal=4)
eq(t_pdb.xyz, t_sybyl.xyz, decimal=4)
top_pdb, bonds_pdb = t_pdb.top.to_dataframe()
top_gaff, bonds_gaff = t_gaff.top.to_dataframe()
top_sybyl, bonds_sybyl = t_sybyl.top.to_dataframe()
eq(top_sybyl.name.values, top_pdb.name.values)
# eq(top_gaff.name.values, top_sybyl.name.values) # THEY CAN HAVE DIFFERENT NAMES, so this isn't TRUE!
def make_bonds_comparable(bond_array):
"""Create a bond connectivity matrix from a numpy array of atom pairs. Avoids having to compare the order in which bonds are listed."""
n_bonds = len(bond_array)
data = np.ones(n_bonds)
i = bond_array[:, 0]
j = bond_array[:, 1]
matrix = scipy.sparse.coo_matrix((data, (i, j)), shape=(t_pdb.n_atoms, t_pdb.n_atoms)).toarray()
return matrix + matrix.T # Symmetrize to account for (a ~ b) versus (b ~ a)
bond_matrix_pdb = make_bonds_comparable(bonds_pdb)
bond_matrix_gaff = make_bonds_comparable(bonds_gaff)
bond_matrix_sybyl = make_bonds_comparable(bonds_sybyl)
eq(bond_matrix_pdb, bond_matrix_gaff)
eq(bond_matrix_pdb, bond_matrix_sybyl)
# Third row from mol2 file copied below, used in testing.
# 3 N1 8.5150 -0.1620 1.3310 n3 1 LIG -0.732600
def test_mol2_dataframe():
top, bonds = mol2.mol2_to_dataframes(get_fn("imatinib.mol2"))
eq(top.name[2], "N1")
eq(top.atype[2], "n3")
eq(top.resName[2], "LIG")
eq(float(top.charge[2]), -0.732600)
def test_mol2_warnings():
trj = md.load_mol2(get_fn('lysozyme-ligand-tripos.mol2'))
|
kyleabeauchamp/mdtraj
|
mdtraj/tests/test_mol2.py
|
Python
|
lgpl-2.1
| 4,725
|
[
"MDTraj"
] |
1f747eeb731223377151237b55db14e7040f6c39eb72a34f28608a11e66ceee1
|
#!/usr/bin/env python3
#title : core.py
#description : Core methods for distributions.
#author : Enys Mones
#date : 2015.06.19
#version : 0.1
#usage : python core.py
#========================================================
import numpy as np
from scipy import stats
from mpmath import ln, sqrt
#############
# CONSTANTS #
#############
# Tolerance for substitution of a distribution.
# For performance and robustness reasons, some distribution are approximated
# by delta or uniform distribution, when their parameters approach some critical
# values. EPSILON is used for detecting these cases.
EPSILON = 0.001
# Default domain size for generating probability mass functions.
DEFAULT_PDF_MAX = 10000
# Default domain size for generating random samples.
DEFAULT_SAMPLE_MAX = DEFAULT_PDF_MAX
# Default number of random samples to generate.
# Used mostly for testing.
DEFAULT_SAMPLE_SIZE = 10000
###########
# CLASSES #
###########
class RealDistribution():
"""
The abstract base class for the distributions.
All distribution have to implement the following:
1) Probability mass function for testing and K-S optimization.
2) Sampling method for testing and p-values of K-S statistics.
3) Log-likelihood for MLE and information criteria.
"""
@staticmethod
def pmf(params, domain=DEFAULT_PDF_MAX):
"""
Returns the probability mass function.
:param params: a list containing the parameters.
:param domain: domain size.
:return: probability mass function as a numpy array.
"""
raise NotImplementedError("Subclass must implement pmf(params, domain).")
@staticmethod
def samples(params, size=DEFAULT_SAMPLE_SIZE, domain=DEFAULT_SAMPLE_MAX):
"""
Returns a given number of samples.
:param params: a list containing the parameters.
:param size: number of samples to return.
:param domain: domain size.
:return: samples in a numpy array.
"""
raise NotImplementedError("Subclass must implement samples(params, size, domain).")
@staticmethod
def log_likelihood(params, data, nonzero_only=False):
"""
Returns the log-likelihood of the distribution for a given sample.
:param params: a list containing the parameters.
:param data: the data over which the log-likelihood should be calculated.
:param nonzero_only: whether nonzero elements should be considered only. In some
cases, this parameter is unused.
:return: the log-likelihood.
"""
raise NotImplementedError("Subclass must implement log_likelihood(params, data).")
@staticmethod
def get_params(params):
"""
Returns a printable string of the distribution parameters.
:param params: list of parameters.
:return: printable string in the format of '(<name1>, <name2>, ...) = (<value1>,
<value2>, ...)', where <nameX> and <valueX> corresponds to the name and value of
parameter X.
"""
return NotImplementedError("Subclass must implement get_params(params).")
class Delta(RealDistribution):
"""
Dirac delta distribution.
This distribution is used to approximate other distributions when some parameters
approach critical values.
"""
@staticmethod
def pmf(params, domain=DEFAULT_PDF_MAX):
"""
Probability mass function of a delta distribution.
:param params: single element list with the location parameter.
:param domain: domain size.
:return: probability mass function.
"""
real_domain = max(int(params[0]), domain)
_pmf = np.append(np.zeros(int(params[0])), [1.0])
return np.append(_pmf, np.zeros(real_domain-int(params[0])))
@staticmethod
def samples(params, size=DEFAULT_SAMPLE_SIZE, domain=DEFAULT_SAMPLE_MAX):
"""
Generates samples for a delta distribution.
:param params: single element list with the location parameter.
:param size: number of samples.
:param domain: unused.
:return: numpy array of samples.
"""
return np.ones(size) * int(params[0])
@staticmethod
def log_likelihood(params, data):
"""
Returns the log-likelihood of a delta distribution.
The distribution is approximated by a narrow Gaussian.
:param params: single element list with the location parameter.
:param data: the data over which the log-likelihood should be calculated.
:return: log-likelihood.
"""
return -len(data)*ln(EPSILON*sqrt(2*np.pi)) - 0.5*np.sum(0.5*np.power(data-params[0], 2))/EPSILON**2
delta = Delta()
class Uniform(RealDistribution):
"""
Uniform distribution.
Mostly used when other distributions are approximated in case of some of their
parameters approach critical values where they can be replaced by a uniform
distribution safely.
"""
@staticmethod
def pmf(params, domain=DEFAULT_PDF_MAX):
"""
Probability mass function of a uniform distribution.
:param params: unused.
:param domain: domain size.
:return: probability mass function.
"""
return np.ones(domain+1)/float(domain+1)
@staticmethod
def samples(params, size=DEFAULT_SAMPLE_SIZE, domain=DEFAULT_SAMPLE_MAX):
"""
Generates samples for a uniform distribution.
:param params: unused.
:param size: number of samples.
:param domain: domain size.
:return: numpy array of samples.
"""
return np.random.uniform(0, domain, size)
@staticmethod
def log_likelihood(params, data):
"""
Returns the log-likelihood of a uniform distribution.
:param params: unused.
:param data: the data over which the log-likelihood should be calculated.
:return: log-likelihood.
"""
return -len(data) * ln(float(np.max(data)))
uniform = Uniform()
def generate_discrete_samples(values, probabilities, size=DEFAULT_SAMPLE_SIZE):
"""
Generates a sample of discrete random variables specified by the probabilities.
:param values: domain of values.
:param probabilities: probabilities, must have the same length as the domain of
values.
:param size: number of samples to return.
:return: list of samples.
"""
assert len(values) == len(probabilities)
_random_sampler = stats.rv_discrete(values=(values, probabilities/np.sum(probabilities)))
return _random_sampler.rvs(size=size)
|
synesenom/model.py
|
core/core.py
|
Python
|
mit
| 6,649
|
[
"DIRAC",
"Gaussian"
] |
730354c449b6ed4aacda3e746564ddc03d6cd9d0324ac4a7c260ccee482c4457
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
""" pytest test configures """
import pytest
import os.path as osp
import numpy as np
import sisl
pytestmark = [pytest.mark.io, pytest.mark.siesta]
_dir = osp.join('sisl', 'io', 'siesta')
def test_dm_si_pdos_kgrid(sisl_files):
fdf = sisl.get_sile(sisl_files(_dir, 'si_pdos_kgrid.fdf'), base=sisl_files(_dir))
si = sisl.get_sile(sisl_files(_dir, 'si_pdos_kgrid.DM'))
DM1 = si.read_density_matrix(geometry=fdf.read_geometry())
DM2 = fdf.read_density_matrix(order=['DM'])
assert DM1._csr.spsame(DM2._csr)
assert np.allclose(DM1._csr._D[:, :-1], DM2._csr._D[:, :-1])
def test_dm_si_pdos_kgrid_rw(sisl_files, sisl_tmp):
fdf = sisl.get_sile(sisl_files(_dir, 'si_pdos_kgrid.fdf'), base=sisl_files(_dir))
geom = fdf.read_geometry()
f1 = sisl.get_sile(sisl_files(_dir, 'si_pdos_kgrid.DM'))
f2 = sisl.get_sile(sisl_tmp('test.DM', _dir))
DM1 = f1.read_density_matrix(geometry=geom)
f2.write_density_matrix(DM1, sort=False)
DM2 = f2.read_density_matrix(geometry=geom)
assert DM1._csr.spsame(DM2._csr)
assert np.allclose(DM1._csr._D[:, :-1], DM2._csr._D[:, :-1])
f2.write_density_matrix(DM1)
DM2 = f2.read_density_matrix(sort=False)
assert DM1._csr.spsame(DM2._csr)
assert not np.allclose(DM1._csr._D[:, :-1], DM2._csr._D[:, :-1])
DM2.finalize()
assert np.allclose(DM1._csr._D[:, :-1], DM2._csr._D[:, :-1])
def test_dm_si_pdos_kgrid_mulliken(sisl_files):
fdf = sisl.get_sile(sisl_files(_dir, 'si_pdos_kgrid.fdf'), base=sisl_files(_dir))
DM = fdf.read_density_matrix(order=['DM'])
Mo = DM.mulliken('orbital')
Ma = DM.mulliken('atom')
o2a = DM.geometry.o2a(np.arange(DM.no))
ma = np.zeros_like(Ma)
np.add.at(ma, o2a, Mo)
assert np.allclose(ma, Ma)
def test_dm_soc_pt2_xx_mulliken(sisl_files):
fdf = sisl.get_sile(sisl_files(_dir, 'SOC_Pt2_xx.fdf'), base=sisl_files(_dir))
# Force reading a geometry with correct atomic and orbital configuration
DM = fdf.read_density_matrix(order=['DM'])
Mo = DM.mulliken('orbital')
Ma = DM.mulliken('atom')
o2a = DM.geometry.o2a(np.arange(DM.no))
ma = np.zeros_like(Ma)
np.add.at(ma, o2a, Mo)
assert np.allclose(ma, Ma)
def test_dm_soc_pt2_xx_rw(sisl_files, sisl_tmp):
f1 = sisl.get_sile(sisl_files(_dir, 'SOC_Pt2_xx.DM'))
f2 = sisl.get_sile(sisl_tmp('test.DM', _dir))
DM1 = f1.read_density_matrix()
f2.write_density_matrix(DM1)
DM2 = f2.read_density_matrix()
assert DM1._csr.spsame(DM2._csr)
DM1.finalize()
assert DM1._csr.spsame(DM2._csr)
assert np.allclose(DM1._csr._D[:, :-1], DM2._csr._D[:, :-1])
@pytest.mark.xfail(reason="Currently reading a geometry from TSHS does not retain l, m, zeta quantum numbers")
def test_dm_soc_pt2_xx_orbital_momentum(sisl_files):
fdf = sisl.get_sile(sisl_files(_dir, 'SOC_Pt2_xx.fdf'), base=sisl_files(_dir))
# Force reading a geometry with correct atomic and orbital configuration
DM = fdf.read_density_matrix(order=['DM'])
o2a = DM.geometry.o2a(np.arange(DM.no))
# Calculate angular momentum
Lo = DM.orbital_momentum('orbital')
La = DM.orbital_momentum('atom')
la = np.zeros_like(La)
np.add.at(la, o2a, Lo.T)
assert np.allclose(la, La)
|
zerothi/sisl
|
sisl/io/siesta/tests/test_dm.py
|
Python
|
mpl-2.0
| 3,454
|
[
"SIESTA"
] |
c212d2df9bc30e8930556530738ed6005fa121d3f4dd1477c4ae81ad2979c49e
|
import requests
import bs4
import json
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError, CollectionInvalid
import datetime as dt
# Define the MongoDB database and table
db_cilent = MongoClient()
db = db_cilent['zoeshrm']
table = db['Restaurant']
# Query the Yelp API once
def single_query(link, payload):
response = requests.get(link, params=payload)
if response.status_code != 200:
print 'WARNING', response.status_code
else:
return response.json()
# Determine if the results are more than 100 pages
def more_than_100_pages(total_page):
if total_page > 100:
pages_left = min(total_page - 100, 100)
return 100, pages_left, True
else:
return total_page, 0, False
# Looping through the pages give the number of pages
def loop_through_pages(total_pages, link, payload, table):
for i in range(total_pages):
if i % 50 == 0:
print ' || Page ', i
payload['page'] = str(i)
content = single_query(link, payload)
meta_lst = content['response']['docs']
for meta in meta_lst:
try:
table.insert(meta)
except DuplicateKeyError:
print 'DUPS!'
# Scrape the meta data (link to article and put it into Mongo)
def scrape_meta(days=1):
# The basic parameters for the NYT API
link = 'http://api.nytimes.com/svc/search/v2/articlesearch.json'
payload = {'api-key': '74c73309c1052e6aa1785df7cd5cef8c:9:69947183'}
today = dt.datetime(2015, 1, 28)
for day in range(days):
payload['end_date'] = str(today).replace('-','')
half_day = today - dt.timedelta(hours=12)
payload['begin_date'] = str(yesterday).replace('-','')
print 'Scraping period: %s - %s ' % (str(yesterday), str(today))
today -= dt.timedelta(days=2)
content = single_query(link, payload)
hits = content['response']['meta']['hits']
total_pages = (hits / 10) + 1
print 'HITS', hits
newest_sort_pages, oldest_sort_pages, grt_100 = more_than_100_pages(total_pages)
if grt_100:
new_payload = payload.copy()
old_payload = payload.copy()
new_payload['sort']= 'newest'
old_payload['sort'] = 'oldest'
loop_through_pages(newest_sort_pages, link, new_payload, table)
loop_through_pages(oldest_sort_pages, link, old_payload, table)
# Scrape the meta data (link to article and put it into Mongo)
def scrape_meta(days=1):
# The basic parameters for the NYT API
link = 'http://api.nytimes.com/svc/search/v2/articlesearch.json'
payload = {'api-key': '74c73309c1052e6aa1785df7cd5cef8c:9:69947183'}
today = dt.datetime(2015, 1, 28)
for day in range(days):
payload['end_date'] = str(today).replace('-','')
half_day = today - dt.timedelta(hours=12)
payload['begin_date'] = str(yesterday).replace('-','')
print 'Scraping period: %s - %s ' % (str(yesterday), str(today))
today -= dt.timedelta(days=2)
content = single_query(link, payload)
hits = content['response']['meta']['hits']
total_pages = (hits / 10) + 1
print 'HITS', hits
newest_sort_pages, oldest_sort_pages, grt_100 = more_than_100_pages(total_pages)
if grt_100:
new_payload = payload.copy()
old_payload = payload.copy()
new_payload['sort']= 'newest'
old_payload['sort'] = 'oldest'
loop_through_pages(newest_sort_pages, link, new_payload, table)
loop_through_pages(oldest_sort_pages, link, old_payload, table)
# Get all the links, visit the page and scrape the content
def get_articles(table):
links = table.find({},{'web_url': 1})
counter = 0
for uid_link in links:
counter += 1
if counter % 100 == 0:
print 'Count: ', counter, ' '
print uid
uid = uid_link['_id']
link = uid_link['web_url']
html = requests.get(link).content
soup = bs4.BeautifulSoup(html, 'html.parser')
article_content = '\n'.join([i.text for i in soup.select('p.story-body-text')])
if not article_content:
article_content = '\n'.join([i.text for i in soup.select('.caption-text')])
if not article_content:
article_content = '\n'.join([i.text for i in soup.select('[itemprop="description"]')])
if not article_content:
article_content = '\n'.join([i.text for i in soup.select('#nytDesignBody')])
else:
article_content = ''
table.update({'_id': uid}, {'$set': {'raw_html': html}})
table.update({'_id': uid}, {'$set': {'content_txt': article_content}})
if __name__ == '__main__':
scrape_meta()
get_articles(table)
|
kennethcc2005/travel_with_friends
|
restaurant_mongo.py
|
Python
|
mit
| 4,820
|
[
"VisIt"
] |
62d760a192ccc2d6b848e3cdd8d409186efce2297e7ad22745c5d183f117f367
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Tools/Database Processing/Extract Event Descriptions from Event Data"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# gnome/gtk
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
ngettext = glocale.translation.ngettext # else "nearby" comments are ignored
from gramps.gen.lib import EventRoleType
from gramps.gen.db import DbTxn
from gramps.gen.utils.db import family_name
from gramps.gui.plug import tool
from gramps.gen.display.name import displayer as name_displayer
#-------------------------------------------------------------------------
#
# EventNames
#
#-------------------------------------------------------------------------
class EventNames(tool.BatchTool):
"""
Look for events that do not have a description, and build the description
from the item that contains it.
Looks for a PRIMARY role type for events attached to a persons, and a
FAMILY role for an event that is attached to a family.
"""
def __init__(self, dbstate, user, options_class, name, callback=None):
self.user = user
tool.BatchTool.__init__(self, dbstate, user, options_class, name)
if not self.fail:
self.run()
def run(self):
"""
Perform the actual extraction of information.
"""
with DbTxn(_("Event name changes"), self.db, batch=True) as trans:
self.db.disable_signals()
self.change = False
counter = 0
with self.user.progress(
_("Extract Event Description"), '',
2) as step:
for person in self.db.iter_people():
for event_ref in person.get_event_ref_list():
if event_ref.get_role() == EventRoleType.PRIMARY:
event_handle = event_ref.ref
event = self.db.get_event_from_handle(event_handle)
if event.get_description() == "":
person_event_name(event, person)
self.db.commit_event(event, trans)
self.change = True
counter += 1
step()
for family in self.db.iter_families():
for event_ref in family.get_event_ref_list():
if event_ref.get_role() == EventRoleType.FAMILY:
event_handle = event_ref.ref
event = self.db.get_event_from_handle(event_handle)
if event.get_description() == "":
family_event_name(event, family, self.db)
self.db.commit_event(event, trans)
self.change = True
counter += 1
step()
self.db.enable_signals()
self.db.request_rebuild()
if hasattr(self.user.uistate, 'window'):
parent_window = self.user.uistate.window
else:
parent_window = None
if self.change == True:
# translators: leave all/any {...} untranslated
message = ngettext("{quantity} event description has been added",
"{quantity} event descriptions have been added",
counter).format(quantity=counter)
self.user.info(_('Modifications made'), message,
parent=parent_window)
else:
self.user.info(_('No modifications made'),
_("No event description has been added."),
parent=parent_window)
#-------------------------------------------------------------------------
#
# Support functions
#
#-------------------------------------------------------------------------
# feature requests 2356, 1658: avoid genitive form
EVENT_FAMILY_STR = _("%(event_name)s of %(family)s")
# feature requests 2356, 1658: avoid genitive form
EVENT_PERSON_STR = _("%(event_name)s of %(person)s")
def person_event_name(event, person):
"""
Build a name for an event based on the primary person's information.
"""
if not event.get_description():
text = EVENT_PERSON_STR % {
'event_name' : str(event.get_type()),
'person' : name_displayer.display(person),
}
event.set_description(text)
def family_event_name(event, family, dbase):
"""
Build a name for an event based on the family's information.
"""
if not event.get_description():
text = EVENT_FAMILY_STR % {
'event_name' : str(event.get_type()),
'family' : family_name(family, dbase),
}
event.set_description(text)
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class EventNamesOptions(tool.ToolOptions):
"""
Define options and provides handling interface.
"""
def __init__(self, name, person_id=None):
tool.ToolOptions.__init__(self, name, person_id)
|
sam-m888/gramps
|
gramps/plugins/tool/eventnames.py
|
Python
|
gpl-2.0
| 6,530
|
[
"Brian"
] |
e2aa9a86b8939eb16e419904a606b09772de2d7edd6f3025fb6cdf54bec2bdd7
|
import k3d
import pathlib
import vtk
path = pathlib.Path(__file__).parent.resolve()
def generate():
plot = k3d.plot(screenshot_scale=1.0)
model_matrix = (
1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0
)
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(str(path) + '/assets/cow.vtp')
reader.Update()
cow3d = k3d.vtk_poly_data(reader.GetOutput(), color=0xff0000,
model_matrix=model_matrix)
plot += cow3d
plot.snapshot_type = 'inline'
return plot.get_snapshot()
|
K3D-tools/K3D-jupyter
|
docs/source/showcase/VTK_polydata_plot.py
|
Python
|
mit
| 608
|
[
"VTK"
] |
bd128237b0edc773bfdcdd5a1aca6ad9827329b489dae8952d11ee6aaf61ca70
|
"""
ConfigParser subclass for config files
with no sections.
"""
from distutils.core import setup
import setuptools # this import is needed so that some options and commands work
setup(
name='simple-configparser',
version='0.1.3.0',
author='Brian E. Peterson',
author_email='bepetersondev@gmail.com',
url='https://github.com/bepetersn/simple-configparser',
zip_safe=False,
description=__doc__,
packages=[
'simple_configparser'
],
install_requires=[
],
)
|
bepetersn/simple-configparser
|
setup.py
|
Python
|
mit
| 510
|
[
"Brian"
] |
a026f4753a9b0c602950c87f3149b5c5e67a89d32078b2aff14ad8daf2244a8a
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Testmodule for the Widom Insertion.
"""
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import espressomd.reaction_ensemble
import tests_common
@utx.skipIfMissingFeatures(["LENNARD_JONES"])
class WidomInsertionTest(ut.TestCase):
"""Test the implementation of the widom insertion.
The excess chemical potential is calculated for identical particles in
a 20 cubed box with a single particle, interacting via a LJ-potential
(cut-off at 5 sigma)."""
N0 = 1
TEMPERATURE = 0.5
TYPE_HA = 0
CHARGE_HA = 0
LJ_EPS = 1.0
LJ_SIG = 1.0
LJ_CUT = 5
BOX_L = 2 * LJ_CUT
LJ_SHIFT = tests_common.lj_potential(
LJ_CUT, LJ_EPS, LJ_SIG, LJ_CUT + 1.0, 0.0)
radius = np.linspace(1e-10, LJ_CUT, 1000)
# numerical integration for radii smaller than the cut-off in spherical
# coordinates
integrateUpToCutOff = 4 * np.pi * np.trapz(
radius**2 * np.exp(-tests_common.lj_potential(radius,
LJ_EPS,
LJ_SIG,
LJ_CUT,
LJ_SHIFT) / TEMPERATURE),
x=radius)
# numerical solution for V_lj=0 => corresponds to the volume (as exp(0)=1)
integreateRest = (BOX_L**3 - 4.0 / 3.0 * np.pi * LJ_CUT**3)
# calculate excess chemical potential of the system, see Frenkel Smith,
# p 174. Note: He uses scaled coordinates, which is why we need to divide
# by the box volume
target_mu_ex = -TEMPERATURE * \
np.log((integrateUpToCutOff + integreateRest) / BOX_L**3)
system = espressomd.System(box_l=np.ones(3) * BOX_L)
system.cell_system.set_n_square()
np.random.seed(69) # make reaction code fully deterministic
system.cell_system.skin = 0.4
Widom = espressomd.reaction_ensemble.WidomInsertion(
kT=TEMPERATURE, seed=1)
# Set the hidden particle type to the lowest possible number to speed
# up the simulation
Widom.set_non_interacting_type(type=1)
def setUp(self):
self.system.part.add(pos=0.5 * self.system.box_l, type=self.TYPE_HA)
self.system.non_bonded_inter[self.TYPE_HA, self.TYPE_HA].lennard_jones.set_params(
epsilon=self.LJ_EPS, sigma=self.LJ_SIG, cutoff=self.LJ_CUT,
shift="auto")
self.Widom.add_reaction(
reactant_types=[],
reactant_coefficients=[],
product_types=[self.TYPE_HA],
product_coefficients=[1],
default_charges={self.TYPE_HA: self.CHARGE_HA})
def test_widom_insertion(self):
num_samples = 10000
particle_insertion_potential_energy_samples = []
for _ in range(num_samples):
# 0 for insertion reaction
particle_insertion_potential_energy = self.Widom.calculate_particle_insertion_potential_energy(
reaction_id=0)
particle_insertion_potential_energy_samples.append(
particle_insertion_potential_energy)
mu_ex_mean, mu_ex_Delta = self.Widom.calculate_excess_chemical_potential(
particle_insertion_potential_energy_samples=particle_insertion_potential_energy_samples)
deviation_mu_ex = abs(np.mean(mu_ex_mean) - self.target_mu_ex)
self.assertLess(
deviation_mu_ex,
1e-3,
msg="\nExcess chemical potential for single LJ-particle computed via Widom insertion is wrong.\n"
+ f" average mu_ex: {np.mean(mu_ex_mean):.4f}"
+ f" mu_ex_std_err: {np.std(mu_ex_Delta):.5f}"
+ f" target_mu_ex: {self.target_mu_ex:.4f}"
)
if __name__ == "__main__":
ut.main()
|
pkreissl/espresso
|
testsuite/python/widom_insertion.py
|
Python
|
gpl-3.0
| 4,515
|
[
"ESPResSo"
] |
d997013653722dc5dd92bdd66e4d5488cffb80a6f511ab1b2cd3210991186ebc
|
"""
=====================
CollectionViewer demo
=====================
Demo of CollectionViewer for viewing collections of images. This demo uses
the different layers of the gaussian pyramid as image collection.
You can scroll through images with the slider, or you can interact with the
viewer using your keyboard:
left/right arrows
Previous/next image in collection.
number keys, 0--9
0% to 90% of collection. For example, "5" goes to the image in the
middle (i.e. 50%) of the collection.
home/end keys
First/last image in collection.
"""
from skimage import data
from skimage.viewer import CollectionViewer
from skimage.transform import pyramid_gaussian
img = data.lena()
img_collection = tuple(pyramid_gaussian(img))
view = CollectionViewer(img_collection)
view.show()
|
chintak/scikit-image
|
viewer_examples/viewers/collection_viewer.py
|
Python
|
bsd-3-clause
| 795
|
[
"Gaussian"
] |
76be2394d70196dbc544d58d652989dd10e0f547337cf101e60d64b49885d552
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: James D. McClain
# Mario Motta
# Yang Gao
# Qiming Sun <osirpt.sun@gmail.com>
# Jason Yu
# Alec White
#
import time
from functools import reduce
import numpy as np
import h5py
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc import scf
from pyscf.cc import uccsd
from pyscf.pbc.lib import kpts_helper
from pyscf.pbc.lib.kpts_helper import member, gamma_point
from pyscf.pbc.mp.kump2 import (get_frozen_mask, get_nocc, get_nmo,
padded_mo_coeff, padding_k_idx)
from pyscf.pbc.cc import kintermediates_uhf
from pyscf import __config__
einsum = lib.einsum
# --- list2array
def mo_c_list_to_array(mo_coeff):
mo_coeff_tmp=[]
for js in range(2):
tmp_nk = len(mo_coeff[js])
tmp_nb = mo_coeff[js][0].shape[0]
tmp_array = np.zeros((tmp_nk,tmp_nb,tmp_nb),dtype=complex)
for ik in range(tmp_nk):
tmp_array[ik,:,:]=mo_coeff[js][ik][:,:]
mo_coeff_tmp.append(tmp_array)
return mo_coeff_tmp
def convert_mo_coeff(mo_coeff):
if isinstance(mo_coeff[0], list):
mo_coeff=mo_c_list_to_array(mo_coeff)
return mo_coeff
def update_amps(cc, t1, t2, eris):
from pyscf.lib.parameters import LOOSE_ZERO_TOL, LARGE_DENOM
time0 = time.clock(), time.time()
log = logger.Logger(cc.stdout, cc.verbose)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
Ht1a = np.zeros_like(t1a)
Ht1b = np.zeros_like(t1b)
Ht2aa = np.zeros_like(t2aa)
Ht2ab = np.zeros_like(t2ab)
Ht2bb = np.zeros_like(t2bb)
nkpts, nocca, nvira = t1a.shape
noccb, nvirb = t1b.shape[1:]
fvv_ = eris.fock[0][:,nocca:,nocca:]
fVV_ = eris.fock[1][:,noccb:,noccb:]
foo_ = eris.fock[0][:,:nocca,:nocca]
fOO_ = eris.fock[1][:,:noccb,:noccb]
fov_ = eris.fock[0][:,:nocca,nocca:]
fOV_ = eris.fock[1][:,:noccb,noccb:]
# Get location of padded elements in occupied and virtual space
nonzero_padding_alpha, nonzero_padding_beta = padding_k_idx(cc, kind="split")
nonzero_opadding_alpha, nonzero_vpadding_alpha = nonzero_padding_alpha
nonzero_opadding_beta, nonzero_vpadding_beta = nonzero_padding_beta
mo_ea_o = [e[:nocca] for e in eris.mo_energy[0]]
mo_eb_o = [e[:noccb] for e in eris.mo_energy[1]]
mo_ea_v = [e[nocca:] + cc.level_shift for e in eris.mo_energy[0]]
mo_eb_v = [e[noccb:] + cc.level_shift for e in eris.mo_energy[1]]
Fvv_, FVV_ = kintermediates_uhf.cc_Fvv(cc, t1, t2, eris)
Foo_, FOO_ = kintermediates_uhf.cc_Foo(cc, t1, t2, eris)
Fov_, FOV_ = kintermediates_uhf.cc_Fov(cc, t1, t2, eris)
# Move energy terms to the other side
for k in range(nkpts):
Fvv_[k][np.diag_indices(nvira)] -= mo_ea_v[k]
FVV_[k][np.diag_indices(nvirb)] -= mo_eb_v[k]
Foo_[k][np.diag_indices(nocca)] -= mo_ea_o[k]
FOO_[k][np.diag_indices(noccb)] -= mo_eb_o[k]
# Get the momentum conservation array
kconserv = cc.khelper.kconserv
# T1 equation
P = kintermediates_uhf.kconserv_mat(cc.nkpts, cc.khelper.kconserv)
Ht1a += fov_.conj()
Ht1b += fOV_.conj()
Ht1a += einsum('xyximae,yme->xia', t2aa, Fov_)
Ht1a += einsum('xyximae,yme->xia', t2ab, FOV_)
Ht1b += einsum('xyximae,yme->xia', t2bb, FOV_)
Ht1b += einsum('yxymiea,yme->xia', t2ab, Fov_)
Ht1a -= einsum('xyzmnae, xzymine->zia', t2aa, eris.ooov)
Ht1a -= einsum('xyzmNaE, xzymiNE->zia', t2ab, eris.ooOV)
#Ht1a -= einsum('xyzmnae,xzymine,xyzw->zia', t2aa, eris.ooov, P)
#Ht1a -= einsum('xyzmNaE,xzymiNE,xyzw->zia', t2ab, eris.ooOV, P)
Ht1b -= einsum('xyzmnae, xzymine->zia', t2bb, eris.OOOV)
#Ht1b -= einsum('xyzmnae,xzymine,xyzw->zia', t2bb, eris.OOOV, P)
Ht1b -= einsum('yxwnmea,xzymine,xyzw->zia', t2ab, eris.OOov, P)
for ka in range(nkpts):
Ht1a[ka] += einsum('ie,ae->ia', t1a[ka], Fvv_[ka])
Ht1b[ka] += einsum('ie,ae->ia', t1b[ka], FVV_[ka])
Ht1a[ka] -= einsum('ma,mi->ia', t1a[ka], Foo_[ka])
Ht1b[ka] -= einsum('ma,mi->ia', t1b[ka], FOO_[ka])
for km in range(nkpts):
# ka == ki; km == kf == km
# <ma||if> = [mi|af] - [mf|ai]
# => [mi|af] - [fm|ia]
Ht1a[ka] += einsum('mf,aimf->ia', t1a[km], eris.voov[ka, ka, km])
Ht1a[ka] -= einsum('mf,miaf->ia', t1a[km], eris.oovv[km, ka, ka])
Ht1a[ka] += einsum('MF,aiMF->ia', t1b[km], eris.voOV[ka, ka, km])
# miaf - mfai => miaf - fmia
Ht1b[ka] += einsum('MF,AIMF->IA', t1b[km], eris.VOOV[ka, ka, km])
Ht1b[ka] -= einsum('MF,MIAF->IA', t1b[km], eris.OOVV[km, ka, ka])
Ht1b[ka] += einsum('mf,fmIA->IA', t1a[km], eris.voOV[km, km, ka].conj())
for kf in range(nkpts):
ki = ka
ke = kconserv[ki, kf, km]
Ht1a[ka] += einsum('imef,fmea->ia', t2aa[ki,km,ke], eris.vovv[kf,km,ke].conj())
Ht1a[ka] += einsum('iMeF,FMea->ia', t2ab[ki,km,ke], eris.VOvv[kf,km,ke].conj())
Ht1b[ka] += einsum('IMEF,FMEA->IA', t2bb[ki,km,ke], eris.VOVV[kf,km,ke].conj())
Ht1b[ka] += einsum('mIfE,fmEA->IA', t2ab[km,ki,kf], eris.voVV[kf,km,ke].conj())
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
# Fvv equation
Ftmpa_kb = Fvv_[kb] - 0.5 * einsum('mb,me->be', t1a[kb], Fov_[kb])
Ftmpb_kb = FVV_[kb] - 0.5 * einsum('MB,ME->BE', t1b[kb], FOV_[kb])
Ftmpa_ka = Fvv_[ka] - 0.5 * einsum('mb,me->be', t1a[ka], Fov_[ka])
Ftmpb_ka = FVV_[ka] - 0.5 * einsum('MB,ME->BE', t1b[ka], FOV_[ka])
tmp = einsum('ijae,be->ijab', t2aa[ki, kj, ka], Ftmpa_kb)
Ht2aa[ki, kj, ka] += tmp
tmp = einsum('IJAE,BE->IJAB', t2bb[ki, kj, ka], Ftmpb_kb)
Ht2bb[ki, kj, ka] += tmp
tmp = einsum('iJaE,BE->iJaB', t2ab[ki, kj, ka], Ftmpb_kb)
Ht2ab[ki, kj, ka] += tmp
tmp = einsum('iJeB,ae->iJaB', t2ab[ki, kj, ka], Ftmpa_ka)
Ht2ab[ki, kj, ka] += tmp
#P(ab)
tmp = einsum('ijbe,ae->ijab', t2aa[ki, kj, kb], Ftmpa_ka)
Ht2aa[ki, kj, ka] -= tmp
tmp = einsum('IJBE,AE->IJAB', t2bb[ki, kj, kb], Ftmpb_ka)
Ht2bb[ki, kj, ka] -= tmp
# Foo equation
Ftmpa_kj = Foo_[kj] + 0.5 * einsum('je,me->mj', t1a[kj], Fov_[kj])
Ftmpb_kj = FOO_[kj] + 0.5 * einsum('JE,ME->MJ', t1b[kj], FOV_[kj])
Ftmpa_ki = Foo_[ki] + 0.5 * einsum('je,me->mj', t1a[ki], Fov_[ki])
Ftmpb_ki = FOO_[ki] + 0.5 * einsum('JE,ME->MJ', t1b[ki], FOV_[ki])
tmp = einsum('imab,mj->ijab', t2aa[ki, kj, ka], Ftmpa_kj)
Ht2aa[ki, kj, ka] -= tmp
tmp = einsum('IMAB,MJ->IJAB', t2bb[ki, kj, ka], Ftmpb_kj)
Ht2bb[ki, kj, ka] -= tmp
tmp = einsum('iMaB,MJ->iJaB', t2ab[ki, kj, ka], Ftmpb_kj)
Ht2ab[ki, kj, ka] -= tmp
tmp = einsum('mJaB,mi->iJaB', t2ab[ki, kj, ka], Ftmpa_ki)
Ht2ab[ki, kj, ka] -= tmp
#P(ij)
tmp = einsum('jmab,mi->ijab', t2aa[kj, ki, ka], Ftmpa_ki)
Ht2aa[ki, kj, ka] += tmp
tmp = einsum('JMAB,MI->IJAB', t2bb[kj, ki, ka], Ftmpb_ki)
Ht2bb[ki, kj, ka] += tmp
# T2 equation
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
Ht2aa += (eris_ovov.transpose(0,2,1,3,5,4,6) - eris_ovov.transpose(2,0,1,5,3,4,6)).conj()
Ht2bb += (eris_OVOV.transpose(0,2,1,3,5,4,6) - eris_OVOV.transpose(2,0,1,5,3,4,6)).conj()
Ht2ab += eris_ovOV.transpose(0,2,1,3,5,4,6).conj()
tauaa, tauab, taubb = kintermediates_uhf.make_tau(cc, t2, t1, t1)
Woooo, WooOO, WOOOO = kintermediates_uhf.cc_Woooo(cc, t1, t2, eris)
# Add the contributions from Wvvvv
for km, ki, kn in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km,ki,kn]
Woooo[km,ki,kn] += .5 * einsum('xmenf, xijef->minj', eris_ovov[km,:,kn], tauaa[ki,kj])
WOOOO[km,ki,kn] += .5 * einsum('xMENF, xIJEF->MINJ', eris_OVOV[km,:,kn], taubb[ki,kj])
WooOO[km,ki,kn] += .5 * einsum('xmeNF, xiJeF->miNJ', eris_ovOV[km,:,kn], tauab[ki,kj])
for km, ki, kn in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km,ki,kn]
Ht2aa[ki,kj,:] += einsum('minj,wmnab->wijab', Woooo[km,ki,kn], tauaa[km,kn]) * .5
Ht2bb[ki,kj,:] += einsum('MINJ,wMNAB->wIJAB', WOOOO[km,ki,kn], taubb[km,kn]) * .5
Ht2ab[ki,kj,:] += einsum('miNJ,wmNaB->wiJaB', WooOO[km,ki,kn], tauab[km,kn])
add_vvvv_(cc, (Ht2aa, Ht2ab, Ht2bb), t1, t2, eris)
Wovvo, WovVO, WOVvo, WOVVO, WoVVo, WOvvO = \
kintermediates_uhf.cc_Wovvo(cc, t1, t2, eris)
#:Ht2ab += einsum('xwzimae,wvumeBJ,xwzv,wuvy->xyziJaB', t2aa, WovVO, P, P)
#:Ht2ab += einsum('xwziMaE,wvuMEBJ,xwzv,wuvy->xyziJaB', t2ab, WOVVO, P, P)
#:Ht2ab -= einsum('xie,zma,uwzBJme,zuwx,xyzu->xyziJaB', t1a, t1a, eris.VOov, P, P)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
Ht2ab[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2aa[kx,kw,kz], WovVO[kw,kv,ku])
Ht2ab[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2ab[kx,kw,kz], WOVVO[kw,kv,ku])
#for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
# kx = kconserv[kz,kw,ku]
# ky = kconserv[kz,kx,ku]
# continue
# Ht2ab[kx, ky, kz] -= lib.einsum('ie, ma, emjb->ijab', t1a[kx], t1a[kz], eris.voOV[kx,kz,kw].conj())
Ht2ab -= einsum('xie, yma, xyzemjb->xzyijab', t1a, t1a, eris.voOV[:].conj())
#:Ht2ab += einsum('wxvmIeA,wvumebj,xwzv,wuvy->yxujIbA', t2ab, Wovvo, P, P)
#:Ht2ab += einsum('wxvMIEA,wvuMEbj,xwzv,wuvy->yxujIbA', t2bb, WOVvo, P, P)
#:Ht2ab -= einsum('xIE,zMA,uwzbjME,zuwx,xyzu->yxujIbA', t1b, t1b, eris.voOV, P, P)
#for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
# kv = kconserv[kx, kz, kw]
# for ku in range(nkpts):
# ky = kconserv[kw, kv, ku]
#Ht2ab[ky,kx,ku] += lib.einsum('miea, mebj-> jiba', t2ab[kw,kx,kv], Wovvo[kw,kv,ku])
#Ht2ab[ky,kx,ku] += lib.einsum('miea, mebj-> jiba', t2bb[kw,kx,kv], WOVvo[kw,kv,ku])
for km, ke, kb in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
Ht2ab[kj,:,kb] += einsum('xmiea, mebj->xjiba', t2ab[km,:,ke], Wovvo[km,ke,kb])
Ht2ab[kj,:,kb] += einsum('xmiea, mebj->xjiba', t2bb[km,:,ke], WOVvo[km,ke,kb])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
Ht2ab[ky,kx,ku] -= lib.einsum('ie, ma, bjme->jiba', t1b[kx], t1b[kz], eris.voOV[ku,kw,kz])
#:Ht2ab += einsum('xwviMeA,wvuMebJ,xwzv,wuvy->xyuiJbA', t2ab, WOvvO, P, P)
#:Ht2ab -= einsum('xie,zMA,zwuMJbe,zuwx,xyzu->xyuiJbA', t1a, t1b, eris.OOvv, P, P)
#for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
# kv = kconserv[kx, kz, kw]
# for ku in range(nkpts):
# ky = kconserv[kw, kv, ku]
# Ht2ab[kx,ky,ku] += lib.einsum('imea,mebj->ijba', t2ab[kx,kw,kv],WOvvO[kw,kv,ku])
for km, ke, kb in kpts_helper.loop_kkk(nkpts):
kj = kconserv[km, ke, kb]
Ht2ab[:,kj,kb] += einsum('ximea, mebj->xijba', t2ab[:,km,ke], WOvvO[km,ke,kb])
for kz,ku,kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
Ht2ab[kx,ky,ku] -= lib.einsum('ie, ma, mjbe->ijba', t1a[kx], t1b[kz], eris.OOvv[kz, kw, ku])
#:Ht2ab += einsum('wxzmIaE,wvumEBj,xwzv,wuvy->yxzjIaB', t2ab, WoVVo, P, P)
#:Ht2ab -= einsum('xIE,zma,zwumjBE,zuwx,xyzu->yxzjIaB', t1b, t1a, eris.ooVV, P, P)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
Ht2ab[ky, kx, kz] += lib.einsum('miae,mebj->jiab', t2ab[kw,kx,kz], WoVVo[kw,kv,ku])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz,kw,ku]
ky = kconserv[kz,kx,ku]
Ht2ab[ky,kx,kz] -= lib.einsum('ie, ma, mjbe->jiab', t1b[kx], t1a[kz], eris.ooVV[kz,kw,ku])
#:u2aa = einsum('xwzimae,wvumebj,xwzv,wuvy->xyzijab', t2aa, Wovvo, P, P)
#:u2aa += einsum('xwziMaE,wvuMEbj,xwzv,wuvy->xyzijab', t2ab, WOVvo, P, P)
#Left this in to keep proper shape, need to replace later
u2aa = np.zeros_like(t2aa)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw, kv, ku]
u2aa[kx,ky,kz] += lib.einsum('imae, mebj->ijab', t2aa[kx,kw,kz], Wovvo[kw,kv,ku])
u2aa[kx,ky,kz] += lib.einsum('imae, mebj->ijab', t2ab[kx,kw,kz], WOVvo[kw,kv,ku])
#:u2aa += einsum('xie,zma,zwumjbe,zuwx,xyzu->xyzijab', t1a, t1a, eris.oovv, P, P)
#:u2aa -= einsum('xie,zma,uwzbjme,zuwx,xyzu->xyzijab', t1a, t1a, eris.voov, P, P)
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz,kw,ku]
ky = kconserv[kz,kx,ku]
u2aa[kx,ky,kz] += lib.einsum('ie,ma,mjbe->ijab',t1a[kx],t1a[kz],eris.oovv[kz,kw,ku])
u2aa[kx,ky,kz] -= lib.einsum('ie,ma,bjme->ijab',t1a[kx],t1a[kz],eris.voov[ku,kw,kz])
#:u2aa += np.einsum('xie,uyzbjae,uzyx->xyzijab', t1a, eris.vovv, P)
#:u2aa -= np.einsum('zma,xzyimjb->xyzijab', t1a, eris.ooov.conj())
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky, ku, kx]
u2aa[kx, ky, kz] += lib.einsum('ie, bjae->ijab', t1a[kx], eris.vovv[ku,ky,kz])
u2aa[kx, ky, kz] -= lib.einsum('ma, imjb->ijab', t1a[kz], eris.ooov[kx,kz,ky].conj())
u2aa = u2aa - u2aa.transpose(1,0,2,4,3,5,6)
u2aa = u2aa - einsum('xyzijab,xyzu->xyuijba', u2aa, P)
Ht2aa += u2aa
#:u2bb = einsum('xwzimae,wvumebj,xwzv,wuvy->xyzijab', t2bb, WOVVO, P, P)
#:u2bb += einsum('wxvMiEa,wvuMEbj,xwzv,wuvy->xyzijab', t2ab, WovVO, P, P)
#:u2bb += einsum('xie,zma,zwumjbe,zuwx,xyzu->xyzijab', t1b, t1b, eris.OOVV, P, P)
#:u2bb -= einsum('xie,zma,uwzbjme,zuwx,xyzu->xyzijab', t1b, t1b, eris.VOOV, P, P)
u2bb = np.zeros_like(t2bb)
for kx, kw, kz in kpts_helper.loop_kkk(nkpts):
kv = kconserv[kx, kz, kw]
for ku in range(nkpts):
ky = kconserv[kw,kv, ku]
u2bb[kx, ky, kz] += lib.einsum('imae,mebj->ijab', t2bb[kx,kw,kz], WOVVO[kw,kv,ku])
u2bb[kx, ky, kz] += lib.einsum('miea, mebj-> ijab', t2ab[kw,kx,kv],WovVO[kw,kv,ku])
for kz, ku, kw in kpts_helper.loop_kkk(nkpts):
kx = kconserv[kz, kw, ku]
ky = kconserv[kz, kx, ku]
u2bb[kx, ky, kz] += lib.einsum('ie, ma, mjbe->ijab',t1b[kx],t1b[kz],eris.OOVV[kz,kw,ku])
u2bb[kx, ky, kz] -= lib.einsum('ie, ma, bjme->ijab', t1b[kx], t1b[kz],eris.VOOV[ku,kw,kz])
#:u2bb += np.einsum('xie,uzybjae,uzyx->xyzijab', t1b, eris.VOVV, P)
#:u2bb -= np.einsum('zma,xzyimjb->xyzijab', t1b, eris.OOOV.conj())
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky, ku, kx]
u2bb[kx,ky,kz] += lib.einsum('ie,bjae->ijab', t1b[kx], eris.VOVV[ku,ky,kz])
#for kx, kz, ky in kpts_helper.loop_kkk(nkpts):
# u2bb[kx,ky,kz] -= lib.einsum('ma, imjb-> ijab', t1b[kz], eris.OOOV[kx,kz,ky].conj())
u2bb -= einsum('zma, xzyimjb->xyzijab', t1b, eris.OOOV[:].conj())
u2bb = u2bb - u2bb.transpose(1,0,2,4,3,5,6)
u2bb = u2bb - einsum('xyzijab,xyzu->xyuijba', u2bb, P)
Ht2bb += u2bb
#:Ht2ab += np.einsum('xie,uyzBJae,uzyx->xyziJaB', t1a, eris.VOvv, P)
#:Ht2ab += np.einsum('yJE,zxuaiBE,zuxy->xyziJaB', t1b, eris.voVV, P)
#:Ht2ab -= np.einsum('zma,xzyimjb->xyzijab', t1a, eris.ooOV.conj())
#:Ht2ab -= np.einsum('umb,yuxjmia,xyuz->xyzijab', t1b, eris.OOov.conj(), P)
for ky, kx, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[ky,ku,kx]
Ht2ab[kx,ky,kz] += lib.einsum('ie, bjae-> ijab', t1a[kx], eris.VOvv[ku,ky,kz])
Ht2ab[kx,ky,kz] += lib.einsum('je, aibe-> ijab', t1b[ky], eris.voVV[kz,kx,ku])
#for kx, kz, ky in kpts_helper.loop_kkk(nkpts):
# Ht2ab[kx,ky,kz] -= lib.einsum('ma, imjb->ijab', t1a[kz], eris.ooOV[kx,kz,ky].conj())
Ht2ab -= einsum('zma, xzyimjb->xyzijab', t1a, eris.ooOV[:].conj())
for kx, ky, ku in kpts_helper.loop_kkk(nkpts):
kz = kconserv[kx, ku, ky]
Ht2ab[kx,ky,kz] -= lib.einsum('mb,jmia->ijab',t1b[ku],eris.OOov[ky,ku,kx].conj())
eia = []
eIA = []
for ki in range(nkpts):
tmp_alpha = []
tmp_beta = []
for ka in range(nkpts):
tmp_eia = LARGE_DENOM * np.ones((nocca, nvira), dtype=eris.mo_energy[0][0].dtype)
tmp_eIA = LARGE_DENOM * np.ones((noccb, nvirb), dtype=eris.mo_energy[0][0].dtype)
n0_ovp_ia = np.ix_(nonzero_opadding_alpha[ki], nonzero_vpadding_alpha[ka])
n0_ovp_IA = np.ix_(nonzero_opadding_beta[ki], nonzero_vpadding_beta[ka])
tmp_eia[n0_ovp_ia] = (mo_ea_o[ki][:,None] - mo_ea_v[ka])[n0_ovp_ia]
tmp_eIA[n0_ovp_IA] = (mo_eb_o[ki][:,None] - mo_eb_v[ka])[n0_ovp_IA]
tmp_alpha.append(tmp_eia)
tmp_beta.append(tmp_eIA)
eia.append(tmp_alpha)
eIA.append(tmp_beta)
for ki in range(nkpts):
ka = ki
# Remove zero/padded elements from denominator
Ht1a[ki] /= eia[ki][ka]
Ht1b[ki] /= eIA[ki][ka]
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
eijab = eia[ki][ka][:,None,:,None] + eia[kj][kb][:,None,:]
Ht2aa[ki,kj,ka] /= eijab
eijab = eia[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Ht2ab[ki,kj,ka] /= eijab
eijab = eIA[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Ht2bb[ki,kj,ka] /= eijab
time0 = log.timer_debug1('update t1 t2', *time0)
return (Ht1a, Ht1b), (Ht2aa, Ht2ab, Ht2bb)
def get_normt_diff(cc, t1, t2, t1new, t2new):
'''Calculates norm(t1 - t1new) + norm(t2 - t2new).'''
return (np.linalg.norm(t1new[0] - t1[0])**2 +
np.linalg.norm(t1new[1] - t1[1])**2 +
np.linalg.norm(t2new[0] - t2[0])**2 +
np.linalg.norm(t2new[1] - t2[1])**2 +
np.linalg.norm(t2new[2] - t2[2])**2) ** .5
def energy(cc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
kka, noa, nva = t1a.shape
kkb, nob, nvb = t1b.shape
assert(kka == kkb)
nkpts = kka
s = 0.0 + 0j
fa, fb = eris.fock
for ki in range(nkpts):
s += einsum('ia,ia', fa[ki, :noa, noa:], t1a[ki, :, :])
s += einsum('ia,ia', fb[ki, :nob, nob:], t1b[ki, :, :])
t1t1aa = np.zeros(shape=t2aa.shape, dtype=t2aa.dtype)
t1t1ab = np.zeros(shape=t2ab.shape, dtype=t2ab.dtype)
t1t1bb = np.zeros(shape=t2bb.shape, dtype=t2bb.dtype)
for ki in range(nkpts):
ka = ki
for kj in range(nkpts):
t1t1aa[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1a[ki, :, :], t1a[kj, :, :])
t1t1ab[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1a[ki, :, :], t1b[kj, :, :])
t1t1bb[ki, kj, ka, :, :, :, :] = einsum('ia,jb->ijab', t1b[ki, :, :], t1b[kj, :, :])
tauaa = t2aa + 2*t1t1aa
tauab = t2ab + t1t1ab
taubb = t2bb + 2*t1t1bb
d = 0.0 + 0.j
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.ovov,tauaa)
- einsum('yzxjaib,xyzijab->',eris.ovov,tauaa))
d += einsum('xzyiajb,xyzijab->',eris.ovOV,tauab)
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.OVOV,taubb)
- einsum('yzxjaib,xyzijab->',eris.OVOV,taubb))
e = s + d
e /= nkpts
if abs(e.imag) > 1e-4:
logger.warn(cc, 'Non-zero imaginary part found in KCCSD energy %s', e)
return e.real
#def get_nocc(cc, per_kpoint=False):
# '''See also function get_nocc in pyscf/pbc/mp2/kmp2.py'''
# if cc._nocc is not None:
# return cc._nocc
#
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# nocca = [(np.count_nonzero(cc.mo_occ[0][k] > 0) - cc.frozen) for k in range(cc.nkpts)]
# noccb = [(np.count_nonzero(cc.mo_occ[1][k] > 0) - cc.frozen) for k in range(cc.nkpts)]
#
# else:
# raise NotImplementedError
#
# if not per_kpoint:
# nocca = np.amax(nocca)
# noccb = np.amax(noccb)
# return nocca, noccb
#
#def get_nmo(cc, per_kpoint=False):
# '''See also function get_nmo in pyscf/pbc/mp2/kmp2.py'''
# if cc._nmo is not None:
# return cc._nmo
#
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# nmoa = [(cc.mo_occ[0][k].size - cc.frozen) for k in range(cc.nkpts)]
# nmob = [(cc.mo_occ[1][k].size - cc.frozen) for k in range(cc.nkpts)]
#
# else:
# raise NotImplementedError
#
# if not per_kpoint:
# nmoa = np.amax(nmoa)
# nmob = np.amax(nmob)
# return nmoa, nmob
#
#def get_frozen_mask(cc):
# '''See also get_frozen_mask function in pyscf/pbc/mp2/kmp2.py'''
#
# moidxa = [np.ones(x.size, dtype=np.bool) for x in cc.mo_occ[0]]
# moidxb = [np.ones(x.size, dtype=np.bool) for x in cc.mo_occ[1]]
# assert(cc.frozen == 0)
#
# if isinstance(cc.frozen, (int, np.integer)):
# for idx in moidxa:
# idx[:cc.frozen] = False
# for idx in moidxb:
# idx[:cc.frozen] = False
# else:
# raise NotImplementedError
#
# return moidxa, moisxb
def amplitudes_to_vector(t1, t2):
return np.hstack((t1[0].ravel(), t1[1].ravel(),
t2[0].ravel(), t2[1].ravel(), t2[2].ravel()))
def vector_to_amplitudes(vec, nmo, nocc, nkpts=1):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
sizes = (nkpts*nocca*nvira, nkpts*noccb*nvirb,
nkpts**3*nocca**2*nvira**2, nkpts**3*nocca*noccb*nvira*nvirb,
nkpts**3*noccb**2*nvirb**2)
sections = np.cumsum(sizes[:-1])
t1a, t1b, t2aa, t2ab, t2bb = np.split(vec, sections)
t1a = t1a.reshape(nkpts,nocca,nvira)
t1b = t1b.reshape(nkpts,noccb,nvirb)
t2aa = t2aa.reshape(nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)
t2ab = t2ab.reshape(nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)
t2bb = t2bb.reshape(nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)
return (t1a,t1b), (t2aa,t2ab,t2bb)
def add_vvvv_(cc, Ht2, t1, t2, eris):
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
nkpts = cc.nkpts
kconserv = cc.khelper.kconserv
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
Ht2aa, Ht2ab, Ht2bb = Ht2
if cc.direct and getattr(eris, 'Lpv', None) is not None:
def get_Wvvvv(ka, kc, kb):
kd = kconserv[ka,kc,kb]
Lpv = eris.Lpv
LPV = eris.LPV
Lbd = (Lpv[kb,kd][:,nocca:] -
lib.einsum('Lkd,kb->Lbd', Lpv[kb,kd][:,:nocca], t1a[kb]))
Wvvvv = lib.einsum('Lac,Lbd->acbd', Lpv[ka,kc][:,nocca:], Lbd)
kcbd = lib.einsum('Lkc,Lbd->kcbd', Lpv[ka,kc][:,:nocca],
Lpv[kb,kd][:,nocca:])
Wvvvv -= lib.einsum('kcbd,ka->acbd', kcbd, t1a[ka])
LBD = (LPV[kb,kd][:,noccb:] -
lib.einsum('Lkd,kb->Lbd', LPV[kb,kd][:,:noccb], t1b[kb]))
WvvVV = lib.einsum('Lac,Lbd->acbd', Lpv[ka,kc][:,nocca:], LBD)
kcbd = lib.einsum('Lkc,Lbd->kcbd', Lpv[ka,kc][:,:nocca],
LPV[kb,kd][:,noccb:])
WvvVV -= lib.einsum('kcbd,ka->acbd', kcbd, t1a[ka])
WVVVV = lib.einsum('Lac,Lbd->acbd', LPV[ka,kc][:,noccb:], LBD)
kcbd = lib.einsum('Lkc,Lbd->kcbd', LPV[ka,kc][:,:noccb],
LPV[kb,kd][:,noccb:])
WVVVV -= lib.einsum('kcbd,ka->acbd', kcbd, t1b[ka])
Wvvvv *= (1./nkpts)
WvvVV *= (1./nkpts)
WVVVV *= (1./nkpts)
return Wvvvv, WvvVV, WVVVV
else:
_Wvvvv, _WvvVV, _WVVVV = kintermediates_uhf.cc_Wvvvv_half(cc, t1, t2, eris)
def get_Wvvvv(ka, kc, kb):
return _Wvvvv[ka,kc,kb], _WvvVV[ka,kc,kb], _WVVVV[ka,kc,kb]
#:Ht2aa += np.einsum('xyuijef,zuwaebf,xyuv,zwuv->xyzijab', tauaa, _Wvvvv-_Wvvvv.transpose(2,1,0,5,4,3,6), P, P) * .5
#:Ht2bb += np.einsum('xyuijef,zuwaebf,xyuv,zwuv->xyzijab', taubb, _WVVVV-_WVVVV.transpose(2,1,0,5,4,3,6), P, P) * .5
#:Ht2ab += np.einsum('xyuiJeF,zuwaeBF,xyuv,zwuv->xyziJaB', tauab, _WvvVV, P, P)
for ka, kb, kc in kpts_helper.loop_kkk(nkpts):
kd = kconserv[ka,kc,kb]
Wvvvv, WvvVV, WVVVV = get_Wvvvv(ka, kc, kb)
for ki in range(nkpts):
kj = kconserv[ka,ki,kb]
tauaa = t2aa[ki,kj,kc].copy()
tauab = t2ab[ki,kj,kc].copy()
taubb = t2bb[ki,kj,kc].copy()
if ki == kc and kj == kd:
tauaa += einsum('ic,jd->ijcd', t1a[ki], t1a[kj])
tauab += einsum('ic,jd->ijcd', t1a[ki], t1b[kj])
taubb += einsum('ic,jd->ijcd', t1b[ki], t1b[kj])
if ki == kd and kj == kc:
tauaa -= einsum('id,jc->ijcd', t1a[ki], t1a[kj])
taubb -= einsum('id,jc->ijcd', t1b[ki], t1b[kj])
tmp = lib.einsum('acbd,ijcd->ijab', Wvvvv, tauaa) * .5
Ht2aa[ki,kj,ka] += tmp
Ht2aa[ki,kj,kb] -= tmp.transpose(0,1,3,2)
tmp = lib.einsum('acbd,ijcd->ijab', WVVVV, taubb) * .5
Ht2bb[ki,kj,ka] += tmp
Ht2bb[ki,kj,kb] -= tmp.transpose(0,1,3,2)
Ht2ab[ki,kj,ka] += lib.einsum('acbd,ijcd->ijab', WvvVV, tauab)
Wvvvv = WvvVV = WVVVV = None
_Wvvvv = _WvvVV = _WVVVV = None
# Contractions below are merged to Woooo intermediates
# tauaa, tauab, taubb = kintermediates_uhf.make_tau(cc, t2, t1, t1)
# P = kintermediates_uhf.kconserv_mat(cc.nkpts, cc.khelper.kconserv)
# minj = np.einsum('xwymenf,uvwijef,xywz,uvwz->xuyminj', eris.ovov, tauaa, P, P)
# MINJ = np.einsum('xwymenf,uvwijef,xywz,uvwz->xuyminj', eris.OVOV, taubb, P, P)
# miNJ = np.einsum('xwymeNF,uvwiJeF,xywz,uvwz->xuymiNJ', eris.ovOV, tauab, P, P)
# Ht2aa += np.einsum('xuyminj,xywmnab,xyuv->uvwijab', minj, tauaa, P) * .25
# Ht2bb += np.einsum('xuyminj,xywmnab,xyuv->uvwijab', MINJ, taubb, P) * .25
# Ht2ab += np.einsum('xuymiNJ,xywmNaB,xyuv->uvwiJaB', miNJ, tauab, P) * .5
return (Ht2aa, Ht2ab, Ht2bb)
class KUCCSD(uccsd.UCCSD):
max_space = getattr(__config__, 'pbc_cc_kccsd_uhf_KUCCSD_max_space', 20)
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
assert(isinstance(mf, scf.khf.KSCF))
uccsd.UCCSD.__init__(self, mf, frozen, mo_coeff, mo_occ)
self.kpts = mf.kpts
self.mo_energy = mf.mo_energy
self.khelper = kpts_helper.KptsHelper(mf.cell, self.kpts)
self.direct = True # If possible, use GDF to compute Wvvvv on-the-fly
keys = set(['kpts', 'mo_energy', 'khelper', 'max_space', 'direct'])
self._keys = self._keys.union(keys)
@property
def nkpts(self):
return len(self.kpts)
get_normt_diff = get_normt_diff
get_nocc = get_nocc
get_nmo = get_nmo
get_frozen_mask = get_frozen_mask
update_amps = update_amps
energy = energy
def dump_flags(self, verbose=None):
return uccsd.UCCSD.dump_flags(self, verbose)
def ao2mo(self, mo_coeff=None):
from pyscf.pbc.df.df import GDF
cell = self._scf.cell
nkpts = self.nkpts
nmoa, nmob = self.nmo
mem_incore = nkpts**3 * (nmoa**4 + nmob**4) * 8 / 1e6
mem_now = lib.current_memory()[0]
if (mem_incore + mem_now < self.max_memory) or self.mol.incore_anyway:
return _make_eris_incore(self, mo_coeff)
elif (self.direct and type(self._scf.with_df) is GDF
and cell.dimension != 2):
# DFKCCSD does not support MDF
return _make_df_eris(self, mo_coeff)
else:
return _make_eris_outcore(self, mo_coeff)
def init_amps(self, eris):
from pyscf.lib.parameters import LOOSE_ZERO_TOL, LARGE_DENOM
time0 = time.clock(), time.time()
nocca, noccb = self.nocc
nmoa, nmob = self.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
nkpts = self.nkpts
t1a = np.zeros((nkpts, nocca, nvira), dtype=np.complex128)
t1b = np.zeros((nkpts, noccb, nvirb), dtype=np.complex128)
t1 = (t1a, t1b)
t2aa = np.zeros((nkpts, nkpts, nkpts, nocca, nocca, nvira, nvira), dtype=np.complex128)
t2ab = np.zeros((nkpts, nkpts, nkpts, nocca, noccb, nvira, nvirb), dtype=np.complex128)
t2bb = np.zeros((nkpts, nkpts, nkpts, noccb, noccb, nvirb, nvirb), dtype=np.complex128)
mo_ea_o = [e[:nocca] for e in eris.mo_energy[0]]
mo_eb_o = [e[:noccb] for e in eris.mo_energy[1]]
mo_ea_v = [e[nocca:] for e in eris.mo_energy[0]]
mo_eb_v = [e[noccb:] for e in eris.mo_energy[1]]
# Get location of padded elements in occupied and virtual space
nonzero_padding_alpha, nonzero_padding_beta = padding_k_idx(self, kind="split")
nonzero_opadding_alpha, nonzero_vpadding_alpha = nonzero_padding_alpha
nonzero_opadding_beta, nonzero_vpadding_beta = nonzero_padding_beta
eia = []
eIA = []
# Create denominators, ignoring padded elements
for ki in range(nkpts):
tmp_alpha = []
tmp_beta = []
for ka in range(nkpts):
tmp_eia = LARGE_DENOM * np.ones((nocca, nvira), dtype=eris.mo_energy[0][0].dtype)
tmp_eIA = LARGE_DENOM * np.ones((noccb, nvirb), dtype=eris.mo_energy[0][0].dtype)
n0_ovp_ia = np.ix_(nonzero_opadding_alpha[ki], nonzero_vpadding_alpha[ka])
n0_ovp_IA = np.ix_(nonzero_opadding_beta[ki], nonzero_vpadding_beta[ka])
tmp_eia[n0_ovp_ia] = (mo_ea_o[ki][:,None] - mo_ea_v[ka])[n0_ovp_ia]
tmp_eIA[n0_ovp_IA] = (mo_eb_o[ki][:,None] - mo_eb_v[ka])[n0_ovp_IA]
tmp_alpha.append(tmp_eia)
tmp_beta.append(tmp_eIA)
eia.append(tmp_alpha)
eIA.append(tmp_beta)
kconserv = kpts_helper.get_kconserv(self._scf.cell, self.kpts)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
Daa = eia[ki][ka][:,None,:,None] + eia[kj][kb][:,None,:]
Dab = eia[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
Dbb = eIA[ki][ka][:,None,:,None] + eIA[kj][kb][:,None,:]
t2aa[ki,kj,ka] = eris.ovov[ki,ka,kj].conj().transpose((0,2,1,3)) / Daa
t2aa[ki,kj,ka]-= eris.ovov[kj,ka,ki].conj().transpose((2,0,1,3)) / Daa
t2ab[ki,kj,ka] = eris.ovOV[ki,ka,kj].conj().transpose((0,2,1,3)) / Dab
t2bb[ki,kj,ka] = eris.OVOV[ki,ka,kj].conj().transpose((0,2,1,3)) / Dbb
t2bb[ki,kj,ka]-= eris.OVOV[kj,ka,ki].conj().transpose((2,0,1,3)) / Dbb
t2 = (t2aa,t2ab,t2bb)
d = 0.0 + 0.j
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.ovov,t2aa)
- einsum('yzxjaib,xyzijab->',eris.ovov,t2aa))
d += einsum('xzyiajb,xyzijab->',eris.ovOV,t2ab)
d += 0.25*(einsum('xzyiajb,xyzijab->',eris.OVOV,t2bb)
- einsum('yzxjaib,xyzijab->',eris.OVOV,t2bb))
self.emp2 = d/nkpts
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2.real)
logger.timer(self, 'init mp2', *time0)
return self.emp2, t1, t2
def amplitudes_to_vector(self, t1, t2):
return amplitudes_to_vector(t1, t2)
def vector_to_amplitudes(self, vec, nmo=None, nocc=None, nkpts=None):
if nocc is None: nocc = self.nocc
if nmo is None: nmo = self.nmo
if nkpts is None: nkpts = self.nkpts
return vector_to_amplitudes(vec, nmo, nocc, nkpts)
UCCSD = KUCCSD
#######################################
#
# _ERIS.
#
# Note the two electron integrals are stored in different orders from
# kccsd_rhf._ERIS. Integrals (ab|cd) are stored as [ka,kb,kc,a,b,c,d] here
# while the order is [ka,kc,kb,a,c,b,d] in kccsd_rhf._ERIS
#
# TODO: use the same convention as kccsd_rhf
#
def _make_eris_incore(cc, mo_coeff=None):
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = convert_mo_coeff(mo_coeff) # FIXME: Remove me!
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
if gamma_point(cc.kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_coeff[0])
eris.oooo = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype=dtype)
eris.ooov = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype=dtype)
eris.oovv = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype=dtype)
eris.ovov = np.empty((nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype=dtype)
eris.voov = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype=dtype)
eris.vovv = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype=dtype)
eris.OOOO = np.empty((nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype=dtype)
eris.OOOV = np.empty((nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype=dtype)
eris.OOVV = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype=dtype)
eris.OVOV = np.empty((nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype=dtype)
eris.VOOV = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype=dtype)
eris.VOVV = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype=dtype)
eris.ooOO = np.empty((nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype=dtype)
eris.ooOV = np.empty((nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype=dtype)
eris.ooVV = np.empty((nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype=dtype)
eris.ovOV = np.empty((nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype=dtype)
eris.voOV = np.empty((nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype=dtype)
eris.voVV = np.empty((nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype=dtype)
eris.OOoo = None
eris.OOov = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype=dtype)
eris.OOvv = np.empty((nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype=dtype)
eris.OVov = np.empty((nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype=dtype)
eris.VOov = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype=dtype)
eris.VOvv = np.empty((nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype=dtype)
_kuccsd_eris_common_(cc, eris)
thisdf = cc._scf.with_df
orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
eris.vvvv = thisdf.ao2mo_7d(orbva, factor=1./nkpts)
eris.VVVV = thisdf.ao2mo_7d(orbvb, factor=1./nkpts)
eris.vvVV = thisdf.ao2mo_7d([orbva,orbva,orbvb,orbvb], factor=1./nkpts)
return eris
def _kuccsd_eris_common_(cc, eris, buf=None):
from pyscf.pbc import tools
from pyscf.pbc.cc.ccsd import _adjust_occ
#if not (cc.frozen is None or cc.frozen == 0):
# raise NotImplementedError('cc.frozen = %s' % str(cc.frozen))
cput0 = (time.clock(), time.time())
log = logger.new_logger(cc)
cell = cc._scf.cell
thisdf = cc._scf.with_df
kpts = cc.kpts
nkpts = cc.nkpts
mo_coeff = eris.mo_coeff
nocca, noccb = eris.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
mo_a, mo_b = mo_coeff
# Re-make our fock MO matrix elements from density and fock AO
dm = cc._scf.make_rdm1(cc.mo_coeff, cc.mo_occ)
hcore = cc._scf.get_hcore()
with lib.temporary_env(cc._scf, exxdiv=None):
vhf = cc._scf.get_veff(cell, dm)
focka = [reduce(np.dot, (mo.conj().T, hcore[k]+vhf[0][k], mo))
for k, mo in enumerate(mo_a)]
fockb = [reduce(np.dot, (mo.conj().T, hcore[k]+vhf[1][k], mo))
for k, mo in enumerate(mo_b)]
eris.fock = (np.asarray(focka), np.asarray(fockb))
eris.e_hf = cc._scf.energy_tot(dm=dm, vhf=vhf)
madelung = tools.madelung(cell, kpts)
mo_ea = [focka[k].diagonal().real for k in range(nkpts)]
mo_eb = [fockb[k].diagonal().real for k in range(nkpts)]
mo_ea = [_adjust_occ(e, nocca, -madelung) for e in mo_ea]
mo_eb = [_adjust_occ(e, noccb, -madelung) for e in mo_eb]
eris.mo_energy = (mo_ea, mo_eb)
orboa = np.asarray(mo_coeff[0][:,:,:nocca], order='C')
orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
orbob = np.asarray(mo_coeff[1][:,:,:noccb], order='C')
orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
dtype = np.result_type(*focka).char
# The momentum conservation array
kconserv = cc.khelper.kconserv
out = None
if isinstance(buf, h5py.Group):
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,nocca,nmoa,nmoa,nmoa), dtype)
oppp = thisdf.ao2mo_7d([orboa,mo_coeff[0],mo_coeff[0],mo_coeff[0]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.oooo[kp,kq,kr] = tmp[:nocca,:nocca,:nocca,:nocca]
eris.ooov[kp,kq,kr] = tmp[:nocca,:nocca,:nocca,nocca:]
eris.oovv[kp,kq,kr] = tmp[:nocca,:nocca,nocca:,nocca:]
eris.ovov[kp,kq,kr] = tmp[:nocca,nocca:,:nocca,nocca:]
eris.voov[kq,kp,ks] = tmp[:nocca,nocca:,nocca:,:nocca].conj().transpose(1,0,3,2)
eris.vovv[kq,kp,ks] = tmp[:nocca,nocca:,nocca:,nocca:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,noccb,nmob,nmob,nmob), dtype)
oppp = thisdf.ao2mo_7d([orbob,mo_coeff[1],mo_coeff[1],mo_coeff[1]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.OOOO[kp,kq,kr] = tmp[:noccb,:noccb,:noccb,:noccb]
eris.OOOV[kp,kq,kr] = tmp[:noccb,:noccb,:noccb,noccb:]
eris.OOVV[kp,kq,kr] = tmp[:noccb,:noccb,noccb:,noccb:]
eris.OVOV[kp,kq,kr] = tmp[:noccb,noccb:,:noccb,noccb:]
eris.VOOV[kq,kp,ks] = tmp[:noccb,noccb:,noccb:,:noccb].conj().transpose(1,0,3,2)
eris.VOVV[kq,kp,ks] = tmp[:noccb,noccb:,noccb:,noccb:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,nocca,nmoa,nmob,nmob), dtype)
oppp = thisdf.ao2mo_7d([orboa,mo_coeff[0],mo_coeff[1],mo_coeff[1]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
eris.ooOO[kp,kq,kr] = tmp[:nocca,:nocca,:noccb,:noccb]
eris.ooOV[kp,kq,kr] = tmp[:nocca,:nocca,:noccb,noccb:]
eris.ooVV[kp,kq,kr] = tmp[:nocca,:nocca,noccb:,noccb:]
eris.ovOV[kp,kq,kr] = tmp[:nocca,nocca:,:noccb,noccb:]
eris.voOV[kq,kp,ks] = tmp[:nocca,nocca:,noccb:,:noccb].conj().transpose(1,0,3,2)
eris.voVV[kq,kp,ks] = tmp[:nocca,nocca:,noccb:,noccb:].conj().transpose(1,0,3,2)
oppp = None
if isinstance(buf, h5py.Group):
del(buf['tmp'])
out = buf.create_dataset('tmp', (nkpts,nkpts,nkpts,noccb,nmob,nmoa,nmoa), dtype)
oppp = thisdf.ao2mo_7d([orbob,mo_coeff[1],mo_coeff[0],mo_coeff[0]], kpts,
factor=1./nkpts, out=out)
for kp, kq, kr in kpts_helper.loop_kkk(nkpts):
ks = kconserv[kp,kq,kr]
tmp = np.asarray(oppp[kp,kq,kr])
#eris.OOoo[kp,kq,kr] = tmp[:noccb,:noccb,:nocca,:nocca]
eris.OOov[kp,kq,kr] = tmp[:noccb,:noccb,:nocca,nocca:]
eris.OOvv[kp,kq,kr] = tmp[:noccb,:noccb,nocca:,nocca:]
eris.OVov[kp,kq,kr] = tmp[:noccb,noccb:,:nocca,nocca:]
eris.VOov[kq,kp,ks] = tmp[:noccb,noccb:,nocca:,:nocca].conj().transpose(1,0,3,2)
eris.VOvv[kq,kp,ks] = tmp[:noccb,noccb:,nocca:,nocca:].conj().transpose(1,0,3,2)
oppp = None
log.timer('CCSD integral transformation', *cput0)
return eris
def _make_eris_outcore(cc, mo_coeff=None):
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = convert_mo_coeff(mo_coeff) # FIXME: Remove me!
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
if gamma_point(cc.kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_coeff[0]).char
eris.feri = feri = lib.H5TmpFile()
eris.oooo = feri.create_dataset('oooo', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype)
eris.ooov = feri.create_dataset('ooov', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype)
eris.oovv = feri.create_dataset('oovv', (nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype)
eris.ovov = feri.create_dataset('ovov', (nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype)
eris.voov = feri.create_dataset('voov', (nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype)
eris.vovv = feri.create_dataset('vovv', (nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype)
eris.vvvv = feri.create_dataset('vvvv', (nkpts,nkpts,nkpts,nvira,nvira,nvira,nvira), dtype)
eris.OOOO = feri.create_dataset('OOOO', (nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype)
eris.OOOV = feri.create_dataset('OOOV', (nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype)
eris.OOVV = feri.create_dataset('OOVV', (nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype)
eris.OVOV = feri.create_dataset('OVOV', (nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype)
eris.VOOV = feri.create_dataset('VOOV', (nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype)
eris.VOVV = feri.create_dataset('VOVV', (nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype)
eris.VVVV = feri.create_dataset('VVVV', (nkpts,nkpts,nkpts,nvirb,nvirb,nvirb,nvirb), dtype)
eris.ooOO = feri.create_dataset('ooOO', (nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype)
eris.ooOV = feri.create_dataset('ooOV', (nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype)
eris.ooVV = feri.create_dataset('ooVV', (nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype)
eris.ovOV = feri.create_dataset('ovOV', (nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype)
eris.voOV = feri.create_dataset('voOV', (nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype)
eris.voVV = feri.create_dataset('voVV', (nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype)
eris.vvVV = feri.create_dataset('vvVV', (nkpts,nkpts,nkpts,nvira,nvira,nvirb,nvirb), dtype)
eris.OOoo = None
eris.OOov = feri.create_dataset('OOov', (nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype)
eris.OOvv = feri.create_dataset('OOvv', (nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype)
eris.OVov = feri.create_dataset('OVov', (nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype)
eris.VOov = feri.create_dataset('VOov', (nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype)
eris.VOvv = feri.create_dataset('VOvv', (nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype)
eris.VVvv = None
fswap = lib.H5TmpFile()
_kuccsd_eris_common_(cc, eris, fswap)
fswap = None
thisdf = cc._scf.with_df
orbva = np.asarray(mo_coeff[0][:,:,nocca:], order='C')
orbvb = np.asarray(mo_coeff[1][:,:,noccb:], order='C')
thisdf.ao2mo_7d(orbva, cc.kpts, factor=1./nkpts, out=eris.vvvv)
thisdf.ao2mo_7d(orbvb, cc.kpts, factor=1./nkpts, out=eris.VVVV)
thisdf.ao2mo_7d([orbva,orbva,orbvb,orbvb], cc.kpts, factor=1./nkpts, out=eris.vvVV)
return eris
def _make_df_eris(cc, mo_coeff=None):
from pyscf.pbc.df import df
from pyscf.ao2mo import _ao2mo
cell = cc._scf.cell
if cell.dimension == 2:
raise NotImplementedError
eris = uccsd._ChemistsERIs()
if mo_coeff is None:
mo_coeff = cc.mo_coeff
mo_coeff = padded_mo_coeff(cc, mo_coeff)
eris.mo_coeff = mo_coeff
eris.nocc = cc.nocc
thisdf = cc._scf.with_df
kpts = cc.kpts
nkpts = cc.nkpts
nocca, noccb = cc.nocc
nmoa, nmob = cc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
if getattr(thisdf, 'auxcell', None):
naux = thisdf.auxcell.nao_nr()
else:
naux = thisdf.get_naoaux()
nao = cell.nao_nr()
mo_kpts_a, mo_kpts_b = eris.mo_coeff
if gamma_point(kpts):
dtype = np.double
else:
dtype = np.complex128
dtype = np.result_type(dtype, *mo_kpts_a)
eris.feri = feri = lib.H5TmpFile()
eris.oooo = feri.create_dataset('oooo', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nocca), dtype)
eris.ooov = feri.create_dataset('ooov', (nkpts,nkpts,nkpts,nocca,nocca,nocca,nvira), dtype)
eris.oovv = feri.create_dataset('oovv', (nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira), dtype)
eris.ovov = feri.create_dataset('ovov', (nkpts,nkpts,nkpts,nocca,nvira,nocca,nvira), dtype)
eris.voov = feri.create_dataset('voov', (nkpts,nkpts,nkpts,nvira,nocca,nocca,nvira), dtype)
eris.vovv = feri.create_dataset('vovv', (nkpts,nkpts,nkpts,nvira,nocca,nvira,nvira), dtype)
eris.vvvv = None
eris.OOOO = feri.create_dataset('OOOO', (nkpts,nkpts,nkpts,noccb,noccb,noccb,noccb), dtype)
eris.OOOV = feri.create_dataset('OOOV', (nkpts,nkpts,nkpts,noccb,noccb,noccb,nvirb), dtype)
eris.OOVV = feri.create_dataset('OOVV', (nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb), dtype)
eris.OVOV = feri.create_dataset('OVOV', (nkpts,nkpts,nkpts,noccb,nvirb,noccb,nvirb), dtype)
eris.VOOV = feri.create_dataset('VOOV', (nkpts,nkpts,nkpts,nvirb,noccb,noccb,nvirb), dtype)
eris.VOVV = feri.create_dataset('VOVV', (nkpts,nkpts,nkpts,nvirb,noccb,nvirb,nvirb), dtype)
eris.VVVV = None
eris.ooOO = feri.create_dataset('ooOO', (nkpts,nkpts,nkpts,nocca,nocca,noccb,noccb), dtype)
eris.ooOV = feri.create_dataset('ooOV', (nkpts,nkpts,nkpts,nocca,nocca,noccb,nvirb), dtype)
eris.ooVV = feri.create_dataset('ooVV', (nkpts,nkpts,nkpts,nocca,nocca,nvirb,nvirb), dtype)
eris.ovOV = feri.create_dataset('ovOV', (nkpts,nkpts,nkpts,nocca,nvira,noccb,nvirb), dtype)
eris.voOV = feri.create_dataset('voOV', (nkpts,nkpts,nkpts,nvira,nocca,noccb,nvirb), dtype)
eris.voVV = feri.create_dataset('voVV', (nkpts,nkpts,nkpts,nvira,nocca,nvirb,nvirb), dtype)
eris.vvVV = None
eris.OOoo = None
eris.OOov = feri.create_dataset('OOov', (nkpts,nkpts,nkpts,noccb,noccb,nocca,nvira), dtype)
eris.OOvv = feri.create_dataset('OOvv', (nkpts,nkpts,nkpts,noccb,noccb,nvira,nvira), dtype)
eris.OVov = feri.create_dataset('OVov', (nkpts,nkpts,nkpts,noccb,nvirb,nocca,nvira), dtype)
eris.VOov = feri.create_dataset('VOov', (nkpts,nkpts,nkpts,nvirb,noccb,nocca,nvira), dtype)
eris.VOvv = feri.create_dataset('VOvv', (nkpts,nkpts,nkpts,nvirb,noccb,nvira,nvira), dtype)
eris.VVvv = None
fswap = lib.H5TmpFile()
_kuccsd_eris_common_(cc, eris, fswap)
fswap = None
eris.Lpv = Lpv = np.empty((nkpts,nkpts), dtype=object)
eris.LPV = LPV = np.empty((nkpts,nkpts), dtype=object)
with h5py.File(thisdf._cderi, 'r') as f:
kptij_lst = f['j3c-kptij'].value
tao = []
ao_loc = None
for ki, kpti in enumerate(kpts):
for kj, kptj in enumerate(kpts):
kpti_kptj = np.array((kpti,kptj))
Lpq = np.asarray(df._getitem(f, 'j3c', kpti_kptj, kptij_lst))
mo_a = np.hstack((mo_kpts_a[ki], mo_kpts_a[kj][:,nocca:]))
mo_b = np.hstack((mo_kpts_b[ki], mo_kpts_b[kj][:,noccb:]))
mo_a = np.asarray(mo_a, dtype=dtype, order='F')
mo_b = np.asarray(mo_b, dtype=dtype, order='F')
if dtype == np.double:
outa = _ao2mo.nr_e2(Lpq, mo_a, (0, nmoa, nmoa, nmoa+nvira), aosym='s2')
outb = _ao2mo.nr_e2(Lpq, mo_b, (0, nmob, nmob, nmob+nvirb), aosym='s2')
else:
#Note: Lpq.shape[0] != naux if linear dependency is found in auxbasis
if Lpq[0].size != nao**2: # aosym = 's2'
Lpq = lib.unpack_tril(Lpq).astype(np.complex128)
outa = _ao2mo.r_e2(Lpq, mo_a, (0, nmoa, nmoa, nmoa+nvira), tao, ao_loc)
outb = _ao2mo.r_e2(Lpq, mo_b, (0, nmob, nmob, nmob+nvirb), tao, ao_loc)
Lpv[ki,kj] = outa.reshape(-1,nmoa,nvira)
LPV[ki,kj] = outb.reshape(-1,nmob,nvirb)
return eris
from pyscf.pbc import scf
scf.kuhf.KUHF.CCSD = lib.class_as_method(KUCCSD)
if __name__ == '__main__':
from pyscf.pbc import gto, scf, cc
from pyscf import lo
cell = gto.Cell()
cell.atom='''
He 0.000000000000 0.000000000000 0.000000000000
He 1.685068664391 1.685068664391 1.685068664391
'''
#cell.basis = [[0, (1., 1.)], [1, (.5, 1.)]]
cell.basis = [[0, (1., 1.)], [0, (.5, 1.)]]
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.mesh = [13]*3
cell.build()
np.random.seed(2)
# Running HF and CCSD with 1x1x2 Monkhorst-Pack k-point mesh
kmf = scf.KUHF(cell, kpts=cell.make_kpts([1,1,3]), exxdiv=None)
nmo = cell.nao_nr()
kmf.mo_occ = np.zeros((2,3,nmo))
kmf.mo_occ[0,:,:3] = 1
kmf.mo_occ[1,:,:1] = 1
kmf.mo_energy = np.arange(nmo) + np.random.random((2,3,nmo)) * .3
kmf.mo_energy[kmf.mo_occ == 0] += 2
mo = (np.random.random((2,3,nmo,nmo)) +
np.random.random((2,3,nmo,nmo))*1j - .5-.5j)
s = kmf.get_ovlp()
kmf.mo_coeff = np.empty_like(mo)
nkpts = len(kmf.kpts)
for k in range(nkpts):
kmf.mo_coeff[0,k] = lo.orth.vec_lowdin(mo[0,k], s[k])
kmf.mo_coeff[1,k] = lo.orth.vec_lowdin(mo[1,k], s[k])
def rand_t1_t2(mycc):
nkpts = mycc.nkpts
nocca, noccb = mycc.nocc
nmoa, nmob = mycc.nmo
nvira, nvirb = nmoa - nocca, nmob - noccb
np.random.seed(1)
t1a = (np.random.random((nkpts,nocca,nvira)) +
np.random.random((nkpts,nocca,nvira))*1j - .5-.5j)
t1b = (np.random.random((nkpts,noccb,nvirb)) +
np.random.random((nkpts,noccb,nvirb))*1j - .5-.5j)
t2aa = (np.random.random((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira)) +
np.random.random((nkpts,nkpts,nkpts,nocca,nocca,nvira,nvira))*1j - .5-.5j)
kconserv = kpts_helper.get_kconserv(kmf.cell, kmf.kpts)
t2aa = t2aa - t2aa.transpose(1,0,2,4,3,5,6)
tmp = t2aa.copy()
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kk, kj]
t2aa[ki,kj,kk] = t2aa[ki,kj,kk] - tmp[ki,kj,kl].transpose(0,1,3,2)
t2ab = (np.random.random((nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb)) +
np.random.random((nkpts,nkpts,nkpts,nocca,noccb,nvira,nvirb))*1j - .5-.5j)
t2bb = (np.random.random((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb)) +
np.random.random((nkpts,nkpts,nkpts,noccb,noccb,nvirb,nvirb))*1j - .5-.5j)
t2bb = t2bb - t2bb.transpose(1,0,2,4,3,5,6)
tmp = t2bb.copy()
for ki, kj, kk in kpts_helper.loop_kkk(nkpts):
kl = kconserv[ki, kk, kj]
t2bb[ki,kj,kk] = t2bb[ki,kj,kk] - tmp[ki,kj,kl].transpose(0,1,3,2)
t1 = (t1a, t1b)
t2 = (t2aa, t2ab, t2bb)
return t1, t2
mycc = KUCCSD(kmf)
eris = mycc.ao2mo()
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (2.2677885702176339-2.5150764056992041j))
print(lib.finger(Ht1[1]) - (-51.643438947846086+526.58026126100458j))
print(lib.finger(Ht2[0]) - (-29.490813482748258-8.7509143690136018j))
print(lib.finger(Ht2[1]) - (2256.0440056839416-193.16480896707569j))
print(lib.finger(Ht2[2]) - (-250.59447681063182-397.57189085666982j))
kmf.mo_occ[:] = 0
kmf.mo_occ[:,:,:2] = 1
mycc = KUCCSD(kmf)
eris = mycc.ao2mo()
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (5.4622516572705662+1.990046725028729j))
print(lib.finger(Ht1[1]) - (4.8801120611799043-5.9940463787453488j))
print(lib.finger(Ht2[0]) - (-192.38864512375193+305.14191018543983j))
print(lib.finger(Ht2[1]) - (23085.044505825954-11527.802302550244j))
print(lib.finger(Ht2[2]) - (115.57932548288559-40.888597453928604j))
from pyscf.pbc.cc import kccsd
kgcc = kccsd.GCCSD(scf.addons.convert_to_ghf(kmf))
kccsd_eris = kccsd._make_eris_incore(kgcc, kgcc._scf.mo_coeff)
r1 = kgcc.spatial2spin(t1)
r2 = kgcc.spatial2spin(t2)
ge = kccsd.energy(kgcc, r1, r2, kccsd_eris)
r1, r2 = kgcc.update_amps(r1, r2, kccsd_eris)
ue = energy(mycc, t1, t2, eris)
print(abs(ge - ue))
print(abs(r1 - kgcc.spatial2spin(Ht1)).max())
print(abs(r2 - kgcc.spatial2spin(Ht2)).max())
kmf = kmf.density_fit(auxbasis=[[0, (1., 1.)]])
mycc = KUCCSD(kmf)
eris = _make_df_eris(mycc, mycc.mo_coeff)
t1, t2 = rand_t1_t2(mycc)
Ht1, Ht2 = mycc.update_amps(t1, t2, eris)
print(lib.finger(Ht1[0]) - (6.9341372555790013+0.87313546297025901j))
print(lib.finger(Ht1[1]) - (6.7538005829391992-0.95702422534126796j))
print(lib.finger(Ht2[0]) - (-509.24544842179876+448.00925776269855j))
print(lib.finger(Ht2[1]) - (107.5960392010511+40.869216223808067j) )
print(lib.finger(Ht2[2]) - (-196.75910296082139+218.53005038057515j))
kgcc = kccsd.GCCSD(scf.addons.convert_to_ghf(kmf))
kccsd_eris = kccsd._make_eris_incore(kgcc, kgcc._scf.mo_coeff)
r1 = kgcc.spatial2spin(t1)
r2 = kgcc.spatial2spin(t2)
ge = kccsd.energy(kgcc, r1, r2, kccsd_eris)
r1, r2 = kgcc.update_amps(r1, r2, kccsd_eris)
print(abs(r1 - kgcc.spatial2spin(Ht1)).max())
print(abs(r2 - kgcc.spatial2spin(Ht2)).max())
print(all([abs(lib.finger(eris.oooo) - (-0.18290712163391809-0.13839081039521306j) )<1e-8,
abs(lib.finger(eris.ooOO) - (-0.084752145202964035-0.28496525042110676j) )<1e-8,
#abs(lib.finger(eris.OOoo) - (0.43054922768629345-0.27990237216969871j) )<1e-8,
abs(lib.finger(eris.OOOO) - (-0.2941475969103261-0.047247498899840978j) )<1e-8,
abs(lib.finger(eris.ooov) - (0.23381463349517045-0.11703340936984277j) )<1e-8,
abs(lib.finger(eris.ooOV) - (-0.052655392703214066+0.69533309442418556j) )<1e-8,
abs(lib.finger(eris.OOov) - (-0.2111361247200903+0.85087916975274647j) )<1e-8,
abs(lib.finger(eris.OOOV) - (-0.36995992208047412-0.18887278030885621j) )<1e-8,
abs(lib.finger(eris.oovv) - (0.21107397525051516+0.0048714991438174871j) )<1e-8,
abs(lib.finger(eris.ooVV) - (-0.076411225687065987+0.11080438166425896j) )<1e-8,
abs(lib.finger(eris.OOvv) - (-0.17880337626095003-0.24174716216954206j) )<1e-8,
abs(lib.finger(eris.OOVV) - (0.059186286356424908+0.68433866387500164j) )<1e-8,
abs(lib.finger(eris.ovov) - (0.15402983765151051+0.064359681685222214j) )<1e-8,
abs(lib.finger(eris.ovOV) - (-0.10697649196044598+0.30351249676253234j) )<1e-8,
#abs(lib.finger(eris.OVov) - (-0.17619329728836752-0.56585020976035816j) )<1e-8,
abs(lib.finger(eris.OVOV) - (-0.63963235318492118+0.69863219317718828j) )<1e-8,
abs(lib.finger(eris.voov) - (-0.24137641647339092+0.18676684336011531j) )<1e-8,
abs(lib.finger(eris.voOV) - (0.19257709151227204+0.38929027819406414j) )<1e-8,
#abs(lib.finger(eris.VOov) - (0.07632606729926053-0.70350947950650355j) )<1e-8,
abs(lib.finger(eris.VOOV) - (-0.47970203195500816+0.46735207193861927j) )<1e-8,
abs(lib.finger(eris.vovv) - (-0.1342049915673903-0.23391327821719513j) )<1e-8,
abs(lib.finger(eris.voVV) - (-0.28989635223866056+0.9644368822688475j) )<1e-8,
abs(lib.finger(eris.VOvv) - (-0.32428269235420271+0.0029847254383674748j))<1e-8,
abs(lib.finger(eris.VOVV) - (0.45031779746222456-0.36858577475752041j) )<1e-8]))
eris = _make_eris_outcore(mycc, mycc.mo_coeff)
print(all([abs(lib.finger(eris.oooo) - (-0.18290712163391809-0.13839081039521306j) )<1e-8,
abs(lib.finger(eris.ooOO) - (-0.084752145202964035-0.28496525042110676j) )<1e-8,
#abs(lib.finger(eris.OOoo) - (0.43054922768629345-0.27990237216969871j) )<1e-8,
abs(lib.finger(eris.OOOO) - (-0.2941475969103261-0.047247498899840978j) )<1e-8,
abs(lib.finger(eris.ooov) - (0.23381463349517045-0.11703340936984277j) )<1e-8,
abs(lib.finger(eris.ooOV) - (-0.052655392703214066+0.69533309442418556j) )<1e-8,
abs(lib.finger(eris.OOov) - (-0.2111361247200903+0.85087916975274647j) )<1e-8,
abs(lib.finger(eris.OOOV) - (-0.36995992208047412-0.18887278030885621j) )<1e-8,
abs(lib.finger(eris.oovv) - (0.21107397525051516+0.0048714991438174871j) )<1e-8,
abs(lib.finger(eris.ooVV) - (-0.076411225687065987+0.11080438166425896j) )<1e-8,
abs(lib.finger(eris.OOvv) - (-0.17880337626095003-0.24174716216954206j) )<1e-8,
abs(lib.finger(eris.OOVV) - (0.059186286356424908+0.68433866387500164j) )<1e-8,
abs(lib.finger(eris.ovov) - (0.15402983765151051+0.064359681685222214j) )<1e-8,
abs(lib.finger(eris.ovOV) - (-0.10697649196044598+0.30351249676253234j) )<1e-8,
#abs(lib.finger(eris.OVov) - (-0.17619329728836752-0.56585020976035816j) )<1e-8,
abs(lib.finger(eris.OVOV) - (-0.63963235318492118+0.69863219317718828j) )<1e-8,
abs(lib.finger(eris.voov) - (-0.24137641647339092+0.18676684336011531j) )<1e-8,
abs(lib.finger(eris.voOV) - (0.19257709151227204+0.38929027819406414j) )<1e-8,
#abs(lib.finger(eris.VOov) - (0.07632606729926053-0.70350947950650355j) )<1e-8,
abs(lib.finger(eris.VOOV) - (-0.47970203195500816+0.46735207193861927j) )<1e-8,
abs(lib.finger(eris.vovv) - (-0.1342049915673903-0.23391327821719513j) )<1e-8,
abs(lib.finger(eris.voVV) - (-0.28989635223866056+0.9644368822688475j) )<1e-8,
abs(lib.finger(eris.VOvv) - (-0.32428269235420271+0.0029847254383674748j))<1e-8,
abs(lib.finger(eris.VOVV) - (0.45031779746222456-0.36858577475752041j) )<1e-8,
abs(lib.finger(eris.vvvv) - (-0.080512851258903173-0.2868384266725581j) )<1e-8,
abs(lib.finger(eris.vvVV) - (-0.5137063762484736+1.1036785801263898j) )<1e-8,
#abs(lib.finger(eris.VVvv) - (0.16468487082491939+0.25730725586992997j) )<1e-8,
abs(lib.finger(eris.VVVV) - (-0.56714875196802295+0.058636785679170501j) )<1e-8]))
|
gkc1000/pyscf
|
pyscf/pbc/cc/kccsd_uhf.py
|
Python
|
apache-2.0
| 59,676
|
[
"PySCF"
] |
b9495332fd71d2836bfb16e63e5afcaf94b5208c6d9f54f2bb4f934d3631afee
|
#!/usr/bin/env python3
########################################################################
# Solves problem 28 from projectEuler.net.
# Finds the sum of the diagonal in an spiral.
# Copyright (C) 2010 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://san-ss.wikidot.com
########################################################################
if __name__ == '__main__':
result = 1
i = 1
for x in range(1, 501):
for y in range(4):
i += 2 * x
result += i
print("The result is:", result)
|
sanSS/programming-contests
|
project-euler/problem028.py
|
Python
|
gpl-3.0
| 1,254
|
[
"VisIt"
] |
74465ad4c826741ce3d447237db57fc10ab6f94f9a2cd3d601ee4b04eb3c9991
|
#!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# vizplotgui_gl.py
# Purpose: viz running LAMMPS simulation via GL tool with plot and GUI
# Syntax: vizplotgui_gl.py in.lammps Nfreq compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point and viz shapshot every this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
# IMPORTANT: this script cannot yet be run in parallel via Pypar,
# because I can't seem to do a MPI-style broadcast in Pypar
from __future__ import print_function
import sys,time
sys.path.append("./pizza")
# methods called by GUI
def run():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# method called by timestep loop every Nfreq steps
# read dump snapshot and viz it, update plot with compute value
def update(ntimestep):
d.next()
d.unscale()
g.show(ntimestep)
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
gn.plot(xaxis,yaxis)
# parse command line
argv = sys.argv
if len(argv) != 4:
print("Syntax: vizplotgui_gl.py in.lammps Nfreq compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
compute = sys.argv[3]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
# dump a file in native LAMMPS dump format for Pizza.py dump tool
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
lmp.command("dump python all atom %d tmp.dump" % nfreq)
# initial 0-step run to generate initial 1-point plot, dump file, and image
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
breakflag = 0
runflag = 0
temptarget = 1.0
# wrapper on GL window via Pizza.py gl tool
# just proc 0 handles reading of dump file and viz
if me == 0:
try:
from Tkinter import *
except:
from tkinter import *
tkroot = Tk()
tkroot.withdraw()
from dump import dump
from gl import gl
d = dump("tmp.dump",0)
g = gl(d)
d.next()
d.unscale()
g.zoom(1)
g.shift(0,0)
g.rotate(0,270)
g.q(10)
g.box(1)
g.show(ntimestep)
# display GUI with run/stop buttons and slider for temperature
if me == 0:
try:
from Tkinter import *
except:
from tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Run",command=run).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# wrapper on GnuPlot via Pizza.py gnu tool
if me == 0:
from gnu import gnu
gn = gnu()
gn.plot(xaxis,yaxis)
gn.title(compute,"Timestep","Temperature")
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 12345" % (temp,temp))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
ntimestep += nfreq
if me == 0: update(ntimestep)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
#pypar.finalize()
|
jag1g13/lammps
|
python/examples/vizplotgui_gl.py
|
Python
|
gpl-2.0
| 4,502
|
[
"LAMMPS"
] |
5076709dcd118aadf6ad0688925da154fc68723b0d789e8e22c3cefd44e739c3
|
# Copyright 2005-2008 by Frank Kauff & Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
# Nodes.py
#
# Provides functionality of a linked list.
# Each node has one (or none) predecessor, and an arbitrary number of successors.
# Nodes can store arbitrary data in a NodeData class.
#
# Subclassed by Nexus.Trees to store phylogenetic trees.
#
# Bug reports to Frank Kauff (fkauff@biologie.uni-kl.de)
#
class ChainException(Exception):
pass
class NodeException(Exception):
pass
class Chain(object):
"""Stores a list of nodes that are linked together."""
def __init__(self):
"""Initiates a node chain: (self)."""
self.chain={}
self.id=-1
def _get_id(self):
"""Gets a new id for a node in the chain."""
self.id+=1
return self.id
def all_ids(self):
"""Return a list of all node ids."""
return self.chain.keys()
def add(self,node,prev=None):
"""Attaches node to another: (self, node, prev)."""
if prev is not None and prev not in self.chain:
raise ChainException('Unknown predecessor: '+str(prev))
else:
id=self._get_id()
node.set_id(id)
node.set_prev(prev)
if prev is not None:
self.chain[prev].add_succ(id)
self.chain[id]=node
return id
def collapse(self,id):
"""Deletes node from chain and relinks successors to predecessor: collapse(self, id)."""
if id not in self.chain:
raise ChainException('Unknown ID: '+str(id))
prev_id=self.chain[id].get_prev()
self.chain[prev_id].remove_succ(id)
succ_ids=self.chain[id].get_succ()
for i in succ_ids:
self.chain[i].set_prev(prev_id)
self.chain[prev_id].add_succ(succ_ids)
node=self.chain[id]
self.kill(id)
return node
def kill(self,id):
"""Kills a node from chain without caring to what it is connected: kill(self,id)."""
if id not in self.chain:
raise ChainException('Unknown ID: '+str(id))
else:
del self.chain[id]
def unlink(self,id):
"""Disconnects node from his predecessor: unlink(self,id)."""
if id not in self.chain:
raise ChainException('Unknown ID: '+str(id))
else:
prev_id=self.chain[id].prev
if prev_id is not None:
self.chain[prev_id].succ.pop(self.chain[prev_id].succ.index(id))
self.chain[id].prev=None
return prev_id
def link(self, parent,child):
"""Connects son to parent: link(self,son,parent)."""
if child not in self.chain:
raise ChainException('Unknown ID: '+str(child))
elif parent not in self.chain:
raise ChainException('Unknown ID: '+str(parent))
else:
self.unlink(child)
self.chain[parent].succ.append(child)
self.chain[child].set_prev(parent)
def is_parent_of(self,parent,grandchild):
"""Check if grandchild is a subnode of parent: is_parent_of(self,parent,grandchild)."""
if grandchild==parent or grandchild in self.chain[parent].get_succ():
return True
else:
for sn in self.chain[parent].get_succ():
if self.is_parent_of(sn,grandchild):
return True
else:
return False
def trace(self,start,finish):
"""Returns a list of all node_ids between two nodes (excluding start, including end): trace(start,end)."""
if start not in self.chain or finish not in self.chain:
raise NodeException('Unknown node.')
if not self.is_parent_of(start,finish) or start==finish:
return []
for sn in self.chain[start].get_succ():
if self.is_parent_of(sn,finish):
return [sn]+self.trace(sn,finish)
class Node(object):
"""A single node."""
def __init__(self,data=None):
"""Represents a node with one predecessor and multiple successors: (self, data=None)."""
self.id=None
self.data=data
self.prev=None
self.succ=[]
def set_id(self,id):
"""Sets the id of a node, if not set yet: (self,id)."""
if self.id is not None:
raise NodeException('Node id cannot be changed.')
self.id=id
def get_id(self):
"""Returns the node's id: (self)."""
return self.id
def get_succ(self):
"""Returns a list of the node's successors: (self)."""
return self.succ
def get_prev(self):
"""Returns the id of the node's predecessor: (self)."""
return self.prev
def add_succ(self,id):
"""Adds a node id to the node's successors: (self,id)."""
if isinstance(id,type([])):
self.succ.extend(id)
else:
self.succ.append(id)
def remove_succ(self,id):
"""Removes a node id from the node's successors: (self,id)."""
self.succ.remove(id)
def set_succ(self,new_succ):
"""Sets the node's successors: (self,new_succ)."""
if not isinstance(new_succ,type([])):
raise NodeException('Node successor must be of list type.')
self.succ=new_succ
def set_prev(self,id):
"""Sets the node's predecessor: (self,id)."""
self.prev=id
def get_data(self):
"""Returns a node's data: (self)."""
return self.data
def set_data(self,data):
"""Sets a node's data: (self,data)."""
self.data=data
|
bryback/quickseq
|
genescript/Bio/Nexus/Nodes.py
|
Python
|
mit
| 5,775
|
[
"Biopython"
] |
545911f36af7b0da9704c7f66ffdee9f59bfc0de899128ecf3dd1035ef6d1774
|
"""
Define common steps for instructor dashboard acceptance tests.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
from lettuce import world, step
from mock import patch
from nose.tools import assert_in
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given I am "([^"]*)" for a very large course')
def make_staff_or_instructor_for_large_course(step, role):
make_large_course(step, role)
@patch.dict('courseware.access.settings.FEATURES', {"MAX_ENROLLMENT_INSTR_BUTTONS": 0})
def make_large_course(step, role):
i_am_staff_or_instructor(step, role)
@step(u'Given I am "([^"]*)" for a course')
def i_am_staff_or_instructor(step, role): # pylint: disable=unused-argument
## In summary: makes a test course, makes a new Staff or Instructor user
## (depending on `role`), and logs that user in to the course
# Store the role
assert_in(role, ['instructor', 'staff'])
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='999',
display_name='Test Course'
)
world.course_key = course.id
world.role = 'instructor'
# Log in as the an instructor or staff for the course
if role == 'instructor':
# Make & register an instructor for the course
world.instructor = InstructorFactory(course_key=world.course_key)
world.enroll_user(world.instructor, world.course_key)
world.log_in(
username=world.instructor.username,
password='test',
email=world.instructor.email,
name=world.instructor.profile.name
)
else:
world.role = 'staff'
# Make & register a staff member
world.staff = StaffFactory(course_key=world.course_key)
world.enroll_user(world.staff, world.course_key)
world.log_in(
username=world.staff.username,
password='test',
email=world.staff.email,
name=world.staff.profile.name
)
def go_to_section(section_name):
# section name should be one of
# course_info, membership, student_admin, data_download, analytics, send_email
world.visit(u'/courses/{}'.format(world.course_key))
world.css_click(u'a[href="/courses/{}/instructor"]'.format(world.course_key))
world.css_click('a[data-section="{0}"]'.format(section_name))
@step(u'I click "([^"]*)"')
def click_a_button(step, button): # pylint: disable=unused-argument
if button == "Generate Grade Report":
# Go to the data download section of the instructor dash
go_to_section("data_download")
# Click generate grade report button
world.css_click('input[name="calculate-grades-csv"]')
# Expect to see a message that grade report is being generated
expected_msg = "The grade report is being created." \
" To view the status of the report, see" \
" Pending Tasks below."
world.wait_for_visible('#report-request-response')
assert_in(
expected_msg, world.css_text('#report-request-response'),
msg="Could not find grade report generation success message."
)
elif button == "Grading Configuration":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="dump-gradeconf"]')
elif button == "List enrolled students' profile information":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles"]')
elif button == "Download profile information as a CSV":
# Go to the data download section of the instructor dash
go_to_section("data_download")
world.css_click('input[name="list-profiles-csv"]')
else:
raise ValueError("Unrecognized button option " + button)
@step(u'I visit the "([^"]*)" tab')
def click_a_button(step, tab_name): # pylint: disable=unused-argument
# course_info, membership, student_admin, data_download, analytics, send_email
tab_name_dict = {
'Course Info': 'course_info',
'Membership': 'membership',
'Student Admin': 'student_admin',
'Data Download': 'data_download',
'Analytics': 'analytics',
'Email': 'send_email',
}
go_to_section(tab_name_dict[tab_name])
|
solashirai/edx-platform
|
lms/djangoapps/instructor/features/common.py
|
Python
|
agpl-3.0
| 4,561
|
[
"VisIt"
] |
a826ae49e75603d8e85c7a306e1af6b6df01f9953490042027db5995b1d98538
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" multifit NLEP fitting input script. """
from os.path import join, exists
from shutil import rmtree
import numpy as np
import os
from .system_params import SystemParams
class System():
def __init__(self):
self.name = ""
self.input = None
self.objective = None
self.special_pc = None
self.outdir = None
self.floating_vbm = False
def __init__(self, name, input, outdir):
self.name = name
self.input = input
self.objective = None
self.special_pc = None
self.floating_vbm = False
self.outdir = outdir
def setup_one_system(cat, an, run_input, nlep_params, sys_params=None):
from pylada.vasp import read_input
from pylada.vasp.extract import Extract, ExtractGW
from pylada.vasp.specie import U, nlep
from pylada.vasp import Vasp
import os
from boost.mpi import world
compound = "%s%s" % (cat, an)
print("creating system for ", cat, an, " with nlep_params= ", nlep_params)
potcar_dir = run_input.potcar_dir
outcar_data_dir = run_input.outcar_data_dir
outdir = run_input.outdir
if (sys_params == None):
sys_params = SystemParams(cat, an, potcar_dir, outcar_data_dir,
run_input.floating_vbm, nlep_params)
else:
print("using user system params for %s" % (compound))
theSys = System(compound, sys_params, outdir)
if (run_input.special_pc != None and compound in run_input.special_pc):
print("overriding target partial charges for systems %s from run_input" % compound)
theSys.special_pc = run_input.special_pc[compound]
if (run_input.floating_vbm):
theSys.floating_vbm = True
if (run_input.eigenvalue_weights != None and compound in run_input.eigenvalue_weights):
print("overriding eigenvalue weights for systems %s from run_input" % compound)
theSys.eigenvalue_weights = run_input.eigenvalue_weights[compound]
return theSys
class MultiSystem():
def __init__(self, run_input, system_params=None):
from pylada.vasp.nlep.postprocessing import load_run_input, find_best, load_test, prepare_analog_fit, get_analog_name
from boost.mpi import world
cations = run_input.cations
anions = run_input.anions
dont_fit = run_input.dont_fit
self.cations = cations
self.anions = anions
self.objectives = None
self.systems = []
self.descriptor = str(cations) + str(anions)
if (run_input.load_from_analogs):
alog_system_names = []
for cat in cations:
for an in anions:
cmpd = get_analog_name(cat, an)
alog_system_names.append(cmpd)
run_input = load_run_input()
withranks = (run_input.optimizer == "lmmin")
if (withranks):
# rank = world.rank
rank = 0
else:
rank = None
job, runs = find_best(alog_system_names, run_input.nbjobs, withranks)
for cat in cations:
for an in anions:
cmpd = "%s%s" % (cat, an)
print("check: is ", cmpd, " in ", dont_fit)
if (dont_fit == None or cmpd not in dont_fit):
if (run_input.load_from_analogs):
analog_cmpd = get_analog_name(cat, an)
test, nlep = load_test(runs[analog_cmpd], job, rank, analog_cmpd, None)
nlep_params = prepare_analog_fit(test, nlep)
print("nlep_params from analog fit: ", nlep_params)
else:
nlep_params = None
# append these, don't require all
if (run_input.nlep_params != None):
if (nlep_params == None):
nlep_params = run_input.nlep_params
else:
for key in run_input.nlep_params:
nlep_params[key] = run_input.nlep_params[key]
if (system_params != None and cmpd in system_params):
this_sys_params = system_params[cmpd]
else:
this_sys_params = None
newsystem = setup_one_system(cat, an, run_input, nlep_params, this_sys_params)
self.systems.append(newsystem)
self.result_size = self.get_result_size(run_input)
def get_nlep_params_x(self):
from .nlep import getx_from_specie, set_nlep_fromx
x = []
for symbol, specie in self.species.items():
x += getx_from_specie(specie)
return x
def getx(self):
x = self.get_nlep_params_x()
for s in self.systems:
if (False and s.floating_vbm): # the variable that stores the band_shift is at the end of the big multi-system x
print("floating vbm")
x.append(0) # initial shift is zero
print(x)
return np.array(x)
def get_ranges(self):
from .nlep import get_range_from_specie
x = []
for symbol, specie in self.species.items():
x += get_range_from_specie(specie)
print(x)
return np.array(x)
def setx(self, x):
from .nlep import getx_from_specie, set_nlep_fromx
i = 0
for symbol, specie in self.species.items():
i = set_nlep_fromx(x, i, specie)
def setup_species(self):
"""extract species from vasp objects to make one list for whole set of systems"""
from .nlep import getx_from_specie, set_nlep_fromx
self.species = {}
for s in self.systems:
for symbol, specie in s.objective.vasp.species.items():
# print "system symbol is", symbol
self.species[symbol] = specie
idx = 0
self.species_dict = {}
for symbol, specie in self.species.items():
# print symbol, specie
xspan = len(getx_from_specie(specie))
spec = SpecRec(symbol, specie, idx, idx + xspan)
idx += xspan
self.species_dict[symbol] = spec
def get_result_size(self, run_input):
from .mpopt import get_result_size
size = 0
for s in self.systems:
size += get_result_size(s.input, run_input)
return size
def mapx_to_system(self, x, sys):
import numpy as np
xsys = []
for symbol, specie in sys.objective.vasp.species.items():
spec = self.species_dict[symbol]
for e in x[spec.xstart:spec.xend]:
xsys.append(e)
if (False and sys.floating_vbm): # the variable that stores the band_shift is at the end of the big multi-system x
total_num_nlep_params = len(self.get_nlep_params_x())
if (len(x) > total_num_nlep_params):
for i in range(0, len(self.systems)):
if (self.systems[i] == sys):
print("found sys ", i, total_num_nlep_params, x)
break
xsys.append(x[total_num_nlep_params + i])
return np.array(xsys)
class SpecRec():
def __init__(self, symbol, specie, xstart, xend):
self.symbol = symbol
self.specie = specie
self.xstart = xstart
self.xend = xend
def setup_systems(run_input, system_params):
systems = MultiSystem(run_input, system_params)
return systems
if __name__ == '__main__':
systems = setup_systems()
systems.setup_species()
systems.getx()
|
pylada/pylada-light
|
src/pylada/vasp/nlep/systems.py
|
Python
|
gpl-3.0
| 8,872
|
[
"CRYSTAL",
"VASP"
] |
2ebea03bd49f1da1ac69ede34fce071d92134bb4698b948bcc4dc2d79325decf
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2019 Jonathan Dent, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import math
import os
import bpy
import appleseed as asr
from .assethandlers import AssetHandler, CopyAssetsAssetHandler
from .cameras import InteractiveCameraTranslator, RenderCameraTranslator
from .material import MaterialTranslator
from .objects import ArchiveAssemblyTranslator, MeshTranslator, LampTranslator
from .textures import TextureTranslator
from .utilites import ProjectExportMode
from .world import WorldTranslator
from ..logger import get_logger
from ..utils.util import Timer, calc_film_aspect_ratio, clamp_value, realpath
logger = get_logger()
class SceneTranslator(object):
"""
Translates a Blender scene into an appleseed project.
"""
# Constructors.
@classmethod
def create_project_export_translator(cls, depsgraph):
project_dir = os.path.dirname(depsgraph.scene_eval.appleseed.export_path)
logger.debug("Creating texture and geometry directories in %s", project_dir)
geometry_dir = os.path.join(project_dir, "_geometry")
textures_dir = os.path.join(project_dir, "_textures")
if not os.path.exists(geometry_dir):
os.makedirs(geometry_dir)
if not os.path.exists(textures_dir):
os.makedirs(textures_dir)
logger.debug("Creating project export scene translator, filename: %s", depsgraph.scene_eval.appleseed.export_path)
asset_handler = CopyAssetsAssetHandler(project_dir, geometry_dir, textures_dir, depsgraph)
return cls(export_mode=ProjectExportMode.PROJECT_EXPORT,
selected_only=depsgraph.scene.appleseed.export_selected,
asset_handler=asset_handler)
@classmethod
def create_final_render_translator(cls, depsgraph):
"""
Create a scene translator to export the scene to an in memory appleseed project.
:param depsgraph:
:return:
"""
logger.debug("Creating final render scene translator")
asset_handler = AssetHandler(depsgraph)
return cls(export_mode=ProjectExportMode.FINAL_RENDER,
selected_only=False,
asset_handler=asset_handler)
@classmethod
def create_interactive_render_translator(cls, depsgraph):
"""
Create a scene translator to export the scene to an in memory appleseed project
optimized for quick interactive edits.
:param depsgraph:
:return:
"""
logger.debug("Creating interactive render scene translator")
asset_handler = AssetHandler(depsgraph)
return cls(export_mode=ProjectExportMode.INTERACTIVE_RENDER,
selected_only=False,
asset_handler=asset_handler)
def __init__(self, export_mode, selected_only, asset_handler):
"""
Constructor. Do not use it to create instances of this class.
Use the @classmethods instead.
"""
self.__asset_handler = asset_handler
self.__export_mode = export_mode
self.__selected_only = selected_only
# Translators.
self.__as_world_translator = None
self.__as_camera_translator = None
self.__as_object_translators = dict()
self.__as_material_translators = dict()
self.__as_texture_translators = dict()
# Motion Steps.
self.__all_times = {0.0}
self.__cam_times = {0.0}
self.__xform_times = {0.0}
self.__deform_times = {0.0}
# Interactive tools.
self.__viewport_resolution = None
self.__current_frame = None
# Render crop window.
self.__crop_window = None
self.__project = None
self.__frame = None
@property
def as_project(self):
return self.__project
@property
def as_scene(self):
return self.__project.get_scene()
@property
def as_main_assembly(self):
return self.__main_assembly
def translate_scene(self, engine, depsgraph, context=None):
logger.debug("appleseed: Translating scene %s", depsgraph.scene_eval.name)
prof_timer = Timer()
prof_timer.start()
self.__create_project(depsgraph)
if self.__export_mode != ProjectExportMode.INTERACTIVE_RENDER:
self.__calc_shutter_times(depsgraph)
self.__translate_render_settings(depsgraph)
self.__calc_viewport_resolution(depsgraph, context)
aovs = self.__set_aovs(depsgraph)
frame_params = self.__translate_frame(depsgraph)
self.__frame = asr.Frame("beauty", frame_params, aovs)
self.__calc_crop_window(depsgraph, context)
if self.__crop_window is not None:
self.__frame.set_crop_window(self.__crop_window)
if len(depsgraph.scene_eval.appleseed.post_processing_stages) > 0 and self.__export_mode != ProjectExportMode.INTERACTIVE_RENDER:
self.__set_post_process(depsgraph)
self.__project.set_frame(self.__frame)
self.__frame = self.__project.get_frame()
# Create camera
if depsgraph.scene_eval.camera is not None:
# Create interactive or final render camera
if self.__export_mode == ProjectExportMode.INTERACTIVE_RENDER:
self.__as_camera_translator = InteractiveCameraTranslator(depsgraph.scene_eval.camera, self.__asset_handler)
else:
self.__as_camera_translator = RenderCameraTranslator(depsgraph.scene_eval.camera, self.__asset_handler)
else:
engine.error_set("appleseed: No camera in scene!")
# Create world
if depsgraph.scene_eval.world is not None:
self.__as_world_translator = WorldTranslator(depsgraph.scene_eval.world, self.__asset_handler)
# Blender scene processing
objects_to_add = dict()
materials_to_add = dict()
textures_to_add = dict()
for obj in bpy.data.objects:
if obj.type == 'LIGHT':
objects_to_add[obj] = LampTranslator(obj, self.__export_mode, self.__asset_handler)
elif obj.type == 'MESH' and len(obj.data.loops) > 0:
objects_to_add[obj] = MeshTranslator(obj, self.__export_mode, self.__asset_handler)
elif obj.type == 'EMPTY' and obj.appleseed.object_export == "archive_assembly":
objects_to_add[obj] = ArchiveAssemblyTranslator(obj, self.__asset_handler)
for mat in bpy.data.materials:
materials_to_add[mat] = MaterialTranslator(mat, self.__asset_handler)
for tex in bpy.data.images:
if tex.users > 0 and tex.name not in ("Render Result", "Viewer Node"):
textures_to_add[tex] = TextureTranslator(tex, self.__asset_handler)
# Create camera, world, material and texture entities
self.__as_camera_translator.create_entities(depsgraph, context, engine)
if self.__as_world_translator is not None:
self.__as_world_translator.create_entities(depsgraph)
for obj, trans in materials_to_add.items():
trans.create_entities(depsgraph, engine)
for obj, trans in textures_to_add.items():
trans.create_entities(depsgraph)
# Set initial position of all objects and lamps
self.__calc_initial_positions(depsgraph, engine, objects_to_add)
# Remove unused translators
for translator in list(objects_to_add.keys()):
if objects_to_add[translator].instances_size == 0:
del objects_to_add[translator]
# Create 3D entities
for obj, trans in objects_to_add.items():
trans.create_entities(depsgraph, len(self.__deform_times))
# Calculate additional steps for motion blur
if self.__export_mode != ProjectExportMode.INTERACTIVE_RENDER:
self.__calc_motion_steps(depsgraph, engine, objects_to_add)
self.__as_camera_translator.flush_entities(self.as_scene, self.as_main_assembly, self.as_project)
if self.__as_world_translator is not None:
self.__as_world_translator.flush_entities(self.as_scene, self.as_main_assembly, self.as_project)
for obj, trans in objects_to_add.items():
trans.flush_entities(self.as_scene, self.as_main_assembly, self.as_project)
for obj, trans in materials_to_add.items():
trans.flush_entities(self.as_scene, self.as_main_assembly, self.as_project)
for obj, trans in textures_to_add.items():
trans.flush_entities(self.as_scene, self.as_main_assembly, self.as_project)
# Transfer temp translators to main list
for bl_obj, translator in objects_to_add.items():
self.__as_object_translators[bl_obj] = translator
for bl_obj, translator in materials_to_add.items():
self.__as_material_translators[bl_obj] = translator
for bl_obj, translator in textures_to_add.items():
self.__as_texture_translators[bl_obj] = translator
self.__load_searchpaths()
prof_timer.stop()
logger.debug("Scene translated in %f seconds.", prof_timer.elapsed())
def update_multiview_camera(self, engine, depsgraph):
current_frame = depsgraph.scene_eval.frame_current
for time in self.__cam_times:
new_frame = current_frame + time
int_frame = math.floor(new_frame)
subframe = new_frame - int_frame
engine.frame_set(int_frame, subframe=subframe)
self.__as_camera_translator.update_mult_cam_xform(engine, depsgraph.scene_eval, time)
engine.frame_set(current_frame, subframe=0.0)
def update_scene(self, depsgraph, engine):
objects_to_add = dict()
materials_to_add = dict()
object_updates = list()
check_for_deletions = False
recreate_instances = list()
# Check for updated datablocks.
for update in depsgraph.updates:
# This one is easy.
if isinstance(update.id, bpy.types.Material):
if update.id.original in self.__as_material_translators.keys():
self.__as_material_translators[update.id.original].update_material(depsgraph, engine)
else:
materials_to_add[update.id.original] = MaterialTranslator(update.id.original,
self.__asset_handler)
# Now comes agony and mental anguish.
elif isinstance(update.id, bpy.types.Object):
if update.id.type == 'MESH':
if update.id.original in self.__as_object_translators.keys():
if update.is_updated_geometry:
self.__as_object_translators[update.id.original].update_obj_instance()
object_updates.append(update.id.original)
if update.is_updated_transform:
recreate_instances.append(update.id.original)
else:
objects_to_add[update.id.original] = MeshTranslator(update.id.original,
self.__export_mode,
self.__asset_handler)
elif update.id.type == 'LIGHT':
if update.id.original in self.__as_object_translators.keys():
if update.is_updated_geometry:
self.__as_object_translators[update.id.original].update_lamp(depsgraph,
self.as_main_assembly,
self.as_scene,
self.__project)
object_updates.append(update.id.original)
recreate_instances.append(update.id.original)
if update.is_updated_transform:
recreate_instances.append(update.id.original)
else:
objects_to_add[update.id.original] = LampTranslator(update.id.original,
self.__export_mode,
self.__asset_handler)
elif update.id.type == 'EMPTY' and update.id.appleseed.object_export == "archive_assembly":
if update.id.original in self.__as_object_translators.keys():
if update.is_updated_geometry:
self.__as_object_translators[update.id.original].update_archive_ass(depsgraph)
object_updates.append(update.id.original)
if update.is_updated_transform:
recreate_instances.append(update.id.original)
elif isinstance(update.id, bpy.types.World):
self.__as_world_translator.update_world(self.as_scene, depsgraph)
elif isinstance(update.id, bpy.types.Scene):
# Check if world was added or deleted.
# Delete existing world.
if depsgraph.scene_eval.world is None and self.__as_world_translator is not None:
self.__as_world_translator.delete_world(self.as_scene)
self.__as_world_translator = None
# Create new world.
elif depsgraph.scene_eval.world is not None and self.__as_world_translator is None:
self.__as_world_translator = WorldTranslator(depsgraph.scene_eval.world, self.__asset_handler)
self.__as_world_translator.create_entities(depsgraph)
self.__as_world_translator.flush_entities(self.as_scene,
self.as_main_assembly,
self.as_project)
elif isinstance(update.id, bpy.types.Collection):
check_for_deletions = True
# Now we figure out which objects have particle systems that need to have their instances recreated.
for obj in object_updates:
if len(obj.particle_systems) > 0:
for system in obj.particle_systems:
if system.settings.render_type == 'OBJECT':
recreate_instances.append(system.settings.instance_object.original)
elif system.settings.render_type == 'COLLECTION':
for other_obj in system.settings.instance_collection.objects:
if other_obj.type in ('MESH', 'LIGHT') and other_obj.original not in recreate_instances:
recreate_instances.append(other_obj.original)
for obj in recreate_instances:
self.__as_object_translators[obj].clear_instances(self.as_main_assembly)
for inst in depsgraph.object_instances:
if inst.show_self:
obj, inst_id = self.__get_instance_data(inst)
if obj in recreate_instances:
self.__as_object_translators[obj].add_instance_step(0.0, inst_id, inst.matrix_world)
elif obj in objects_to_add.keys():
objects_to_add[obj].add_instance_step(0.0, inst_id, inst.matrix_world)
# Create new materials.
for mat in materials_to_add.values():
mat.create_entities(depsgraph, engine)
# Create new objects.
for trans in objects_to_add.values():
trans.create_entities(depsgraph, 0)
for obj in recreate_instances:
self.__as_object_translators[obj].flush_instances(self.as_main_assembly)
for mat_obj, trans in materials_to_add.items():
trans.flush_entities(self.as_scene, self.as_main_assembly, self.as_project)
self.__as_material_translators[mat_obj] = trans
for bl_obj, trans in objects_to_add.items():
trans.flush_entities(self.as_scene, self.as_main_assembly, self.as_project)
self.__as_object_translators[bl_obj] = trans
# Check if any objects were deleted.
if check_for_deletions:
obj_list = list(self.__as_object_translators.keys())
for obj in obj_list:
try:
if obj.name_full in bpy.data.objects or obj.name_full in bpy.data.lights:
continue
except:
self.__as_object_translators[obj].delete_object(self.as_main_assembly)
del self.__as_object_translators[obj]
def check_view_window(self, depsgraph, context):
# Check if any camera parameters have changed (location, model, etc...)
updates = self.__as_camera_translator.check_for_updates(context, depsgraph.scene_eval)
# Check if frame size has changed.
current_resolution = self.__viewport_resolution
self.__calc_viewport_resolution(depsgraph, context)
updates['frame_size'] = current_resolution != self.__viewport_resolution
# Check if crop window has changed.
current_crop_window = self.__crop_window
self.__calc_crop_window(depsgraph, context)
updates['crop_window'] = current_crop_window != self.__crop_window
return updates
def update_view_window(self, updates, depsgraph):
if updates['cam_model']:
self.__as_camera_translator.update_cam_model(self.as_scene)
self.__as_camera_translator.add_cam_xform(0.0)
else:
if updates['cam_params']:
self.__as_camera_translator.update_cam_params()
if updates['cam_xform']:
self.__as_camera_translator.add_cam_xform(0.0)
if updates['frame_size']:
self.__update_frame_size(depsgraph)
if updates['crop_window']:
self.__frame.reset_crop_window()
if self.__crop_window is not None:
self.__frame.set_crop_window(self.__crop_window)
# Interactive update functions.
def write_project(self, export_path):
# Export project files.
filename = os.path.abspath(bpy.path.ensure_ext(bpy.path.abspath(export_path), '.appleseed'))
asr.ProjectFileWriter().write(self.__project,
filename,
asr.ProjectFileWriterOptions.OmitWritingGeometryFiles | asr.ProjectFileWriterOptions.OmitHandlingAssetFiles)
# Internal methods.
def __create_project(self, depsgraph):
logger.debug("appleseed: Creating appleseed project")
self.__project = asr.Project(depsgraph.scene_eval.name)
# Render settings.
self.__project.add_default_configurations()
# Create the scene.
self.__project.set_scene(asr.Scene())
# Create the environment.
self.as_scene.set_environment(asr.Environment("environment", {}))
# Create the main assembly.
self.as_scene.assemblies().insert(asr.Assembly("assembly", {}))
self.__main_assembly = self.as_scene.assemblies()["assembly"]
# Instance the main assembly.
assembly_inst = asr.AssemblyInstance("assembly_inst", {}, "assembly")
assembly_inst.transform_sequence().set_transform(0.0, asr.Transformd(asr.Matrix4d.identity()))
self.as_scene.assembly_instances().insert(assembly_inst)
# Create default materials.
self.__create_default_material()
self.__create_null_material()
def __create_default_material(self):
logger.debug("appleseed: Creating default material")
surface_shader = asr.SurfaceShader("diagnostic_surface_shader", "__default_surface_shader", {'mode': 'facing_ratio'})
material = asr.Material('generic_material', "__default_material", {'surface_shader': '__default_surface_shader'})
self.as_main_assembly.surface_shaders().insert(surface_shader)
self.as_main_assembly.materials().insert(material)
def __create_null_material(self):
logger.debug("appleseed: Creating null material")
material = asr.Material('generic_material', "__null_material", {})
self.as_main_assembly.materials().insert(material)
def __calc_shutter_times(self, depsgraph):
scene = depsgraph.scene_eval
shutter_length = scene.appleseed.shutter_close - scene.appleseed.shutter_open
if scene.appleseed.enable_camera_blur:
self.__get_sub_frames(
scene, shutter_length, scene.appleseed.camera_blur_samples, self.__cam_times)
if scene.appleseed.enable_object_blur:
self.__get_sub_frames(scene, shutter_length, scene.appleseed.object_blur_samples, self.__xform_times)
if scene.appleseed.enable_deformation_blur:
self.__get_sub_frames(scene,
shutter_length,
self.__round_up_pow2(scene.appleseed.deformation_blur_samples),
self.__deform_times)
# Merge all subframe times
all_times = set()
all_times.update(self.__cam_times)
all_times.update(self.__xform_times)
all_times.update(self.__deform_times)
self.__all_times = sorted(list(all_times))
def __translate_render_settings(self, depsgraph):
logger.debug("appleseed: Translating render settings")
scene = depsgraph.scene_eval
asr_scene_props = scene.appleseed
conf_final = self.__project.configurations()['final']
conf_interactive = self.__project.configurations()['interactive']
lighting_engine = asr_scene_props.lighting_engine if self.__export_mode != ProjectExportMode.INTERACTIVE_RENDER else 'pt'
tile_renderer = 'adaptive' if asr_scene_props.pixel_sampler == 'adaptive' else 'generic'
pixel_render_mapping = {'uniform': 'uniform',
'adaptive': '',
'texture': 'texture'}
pixel_renderer = pixel_render_mapping[asr_scene_props.pixel_sampler]
parameters = {'uniform_pixel_renderer': {'force_antialiasing': True if asr_scene_props.force_aa else False,
'samples': asr_scene_props.samples},
'adaptive_tile_renderer': {'min_samples': asr_scene_props.adaptive_min_samples,
'noise_threshold': asr_scene_props.adaptive_noise_threshold,
'batch_size': asr_scene_props.adaptive_batch_size,
'max_samples': asr_scene_props.adaptive_max_samples},
'texture_controlled_pixel_renderer': {'min_samples': asr_scene_props.adaptive_min_samples,
'max_samples': asr_scene_props.adaptive_max_samples,
'file_path': realpath(asr_scene_props.texture_sampler_filepath)},
'use_embree': asr_scene_props.use_embree,
'pixel_renderer': pixel_renderer,
'lighting_engine': lighting_engine,
'tile_renderer': tile_renderer,
'passes': asr_scene_props.renderer_passes,
'generic_frame_renderer': {'tile_ordering': asr_scene_props.tile_ordering},
'progressive_frame_renderer': {'max_average_spp': asr_scene_props.interactive_max_samples,
'max_fps': asr_scene_props.interactive_max_fps,
'time_limit': asr_scene_props.interactive_max_time},
'light_sampler': {'algorithm': asr_scene_props.light_sampler,
'enable_light_importance_sampling': asr_scene_props.enable_light_importance_sampling},
'shading_result_framebuffer': "permanent" if asr_scene_props.renderer_passes > 1 else "ephemeral"}
if self.__export_mode != ProjectExportMode.PROJECT_EXPORT:
if self.__export_mode == ProjectExportMode.INTERACTIVE_RENDER:
render_threads = -1
else:
render_threads = asr_scene_props.threads if not asr_scene_props.threads_auto else 'auto'
parameters['rendering_threads'] = render_threads
parameters['texture_store'] = {'max_size': asr_scene_props.tex_cache * 1024 * 1024}
if lighting_engine == 'pt':
parameters['pt'] = {'enable_ibl': True if asr_scene_props.enable_ibl else False,
'enable_dl': True if asr_scene_props.enable_dl else False,
'enable_caustics': True if scene.appleseed.enable_caustics else False,
'clamp_roughness': True if scene.appleseed.enable_clamp_roughness else False,
'record_light_paths': True if scene.appleseed.record_light_paths else False,
'next_event_estimation': True,
'rr_min_path_length': asr_scene_props.rr_start,
'optimize_for_lights_outside_volumes': asr_scene_props.optimize_for_lights_outside_volumes,
'volume_distance_samples': asr_scene_props.volume_distance_samples,
'dl_light_samples': asr_scene_props.dl_light_samples,
'ibl_env_samples': asr_scene_props.ibl_env_samples,
'dl_low_light_threshold': asr_scene_props.dl_low_light_threshold,
'max_diffuse_bounces': asr_scene_props.max_diffuse_bounces if not asr_scene_props.max_diffuse_bounces_unlimited else -1,
'max_glossy_bounces': asr_scene_props.max_glossy_brdf_bounces if not asr_scene_props.max_glossy_brdf_bounces_unlimited else -1,
'max_specular_bounces': asr_scene_props.max_specular_bounces if not asr_scene_props.max_specular_bounces_unlimited else -1,
'max_volume_bounces': asr_scene_props.max_volume_bounces if not asr_scene_props.max_volume_bounces_unlimited else -1,
'max_bounces': asr_scene_props.max_bounces if not asr_scene_props.max_bounces_unlimited else -1}
if not asr_scene_props.max_ray_intensity_unlimited:
parameters['pt']['max_ray_intensity'] = asr_scene_props.max_ray_intensity
else:
parameters['sppm'] = {'alpha': asr_scene_props.sppm_alpha,
'dl_mode': asr_scene_props.sppm_dl_mode,
'enable_caustics': "true" if asr_scene_props.enable_caustics else "false",
'env_photons_per_pass': asr_scene_props.sppm_env_photons,
'initial_radius': asr_scene_props.sppm_initial_radius,
'light_photons_per_pass': asr_scene_props.sppm_light_photons,
# Leave at 0 for now - not in appleseed.studio GUI
'max_path_length': 0,
'enable_importons': asr_scene_props.sppm_enable_importons,
'importon_lookup_radius': asr_scene_props.sppm_importon_lookup_radius,
'max_photons_per_estimate': asr_scene_props.sppm_max_per_estimate,
'path_tracing_max_path_length': asr_scene_props.sppm_pt_max_length,
'path_tracing_rr_min_path_length': asr_scene_props.sppm_pt_rr_start,
'photon_tracing_max_path_length': asr_scene_props.sppm_photon_max_length,
'photon_tracing_rr_min_path_length': asr_scene_props.sppm_photon_rr_start}
if not asr_scene_props.sppm_pt_max_ray_intensity_unlimited:
parameters['sppm']['path_tracing_max_ray_intensity'] = asr_scene_props.sppm_pt_max_ray_intensity
if asr_scene_props.shading_override:
parameters['shading_engine'] = {'override_shading': {'mode': asr_scene_props.override_mode}}
conf_final.set_parameters(parameters)
parameters['lighting_engine'] = 'pt'
conf_interactive.set_parameters(parameters)
def __calc_viewport_resolution(self, depsgraph, context):
scene = depsgraph.scene_eval
scale = scene.render.resolution_percentage / 100.0
if context is not None:
width = int(context.region.width)
height = int(context.region.height)
else:
width = int(scene.render.resolution_x * scale)
height = int(scene.render.resolution_y * scale)
self.__viewport_resolution = [width, height]
def __translate_frame(self, depsgraph):
logger.debug("appleseed: Translating frame")
scene = depsgraph.scene_eval
asr_scene_props = scene.appleseed
noise_seed = (asr_scene_props.noise_seed + scene.frame_current) if asr_scene_props.per_frame_noise else asr_scene_props.noise_seed
width, height = self.__viewport_resolution
frame_params = {'resolution': asr.Vector2i(width, height),
'camera': "Camera",
'filter': asr_scene_props.pixel_filter,
'filter_size': asr_scene_props.pixel_filter_size,
'denoiser': asr_scene_props.denoise_mode,
'noise_seed': noise_seed,
'skip_denoised': asr_scene_props.skip_denoised,
'random_pixel_order': asr_scene_props.random_pixel_order,
'prefilter_spikes': asr_scene_props.prefilter_spikes,
'spike_threshold': asr_scene_props.spike_threshold,
'patch_distance_threshold': asr_scene_props.patch_distance_threshold,
'denoise_scales': asr_scene_props.denoise_scales,
'mark_invalid_pixels': asr_scene_props.mark_invalid_pixels}
if self.__export_mode != ProjectExportMode.PROJECT_EXPORT:
frame_params['tile_size'] = asr.Vector2i(asr_scene_props.tile_size, asr_scene_props.tile_size)
return frame_params
def __calc_crop_window(self, depsgraph, context=None):
width, height = self.__viewport_resolution
self.__crop_window = None
if depsgraph.scene_eval.render.use_border and self.__export_mode != ProjectExportMode.INTERACTIVE_RENDER:
min_x = int(depsgraph.scene_eval.render.border_min_x * width)
min_y = height - int(depsgraph.scene_eval.render.border_max_y * height)
max_x = int(depsgraph.scene_eval.render.border_max_x * width) - 1
max_y = height - int(depsgraph.scene_eval.render.border_min_y * height) - 1
self.__crop_window = [min_x,
min_y,
max_x,
max_y]
else:
# Interactive render borders
if context is not None and context.space_data.use_render_border and context.region_data.view_perspective in ('ORTHO', 'PERSP'):
min_x = int(context.space_data.render_border_min_x * width)
min_y = height - int(context.space_data.render_border_max_y * height)
max_x = int(context.space_data.render_border_max_x * width) - 1
max_y = height - int(context.space_data.render_border_min_y * height) - 1
self.__crop_window = [min_x,
min_y,
max_x,
max_y]
elif depsgraph.scene_eval.render.use_border and context.region_data.view_perspective == 'CAMERA':
"""
I can't explain how the following code produces the correct render window.
I basically threw every parameter combination I could think of together
until the result looked right.
"""
zoom = 4 / ((math.sqrt(2) + context.region_data.view_camera_zoom / 50) ** 2)
frame_aspect_ratio = width / height
camera_aspect_ratio = calc_film_aspect_ratio(depsgraph.scene_eval)
if frame_aspect_ratio > 1:
camera_width = width / zoom
camera_height = camera_width / camera_aspect_ratio
else:
camera_height = height / (zoom * camera_aspect_ratio)
camera_width = camera_height * camera_aspect_ratio
view_offset_x, view_offset_y = context.region_data.view_camera_offset
view_shift_x = ((view_offset_x * 2) / zoom) * width
view_shift_y = ((view_offset_y * 2) / zoom) * height
window_shift_x = (width - camera_width) / 2
window_shift_y = (height - camera_height) / 2
window_x_min = int(camera_width * depsgraph.scene_eval.render.border_min_x + window_shift_x - view_shift_x)
window_x_max = int(camera_width * depsgraph.scene_eval.render.border_max_x + window_shift_x - view_shift_x)
window_y_min = height - int(camera_height * depsgraph.scene_eval.render.border_max_y + window_shift_y - view_shift_y)
window_y_max = height - int(camera_height * depsgraph.scene_eval.render.border_min_y + window_shift_y - view_shift_y)
# Check for coordinates outside the render window.
min_x = clamp_value(window_x_min, 0, width - 1)
min_y = clamp_value(window_y_min, 0, height - 1)
max_x = clamp_value(window_x_max, 0, width - 1)
max_y = clamp_value(window_y_max, 0, height - 1)
self.__crop_window = [min_x,
min_y,
max_x,
max_y]
def __set_aovs(self, depsgraph):
logger.debug("appleseed: Translating AOVs")
asr_scene_props = depsgraph.scene_eval.appleseed
aovs = asr.AOVContainer()
if self.__export_mode != ProjectExportMode.INTERACTIVE_RENDER:
if asr_scene_props.albedo_aov:
aovs.insert(asr.AOV('albedo_aov', {}))
if asr_scene_props.diffuse_aov:
aovs.insert(asr.AOV('diffuse_aov', {}))
if asr_scene_props.direct_diffuse_aov:
aovs.insert(asr.AOV('direct_diffuse_aov', {}))
if asr_scene_props.direct_glossy_aov:
aovs.insert(asr.AOV('direct_glossy_aov', {}))
if asr_scene_props.emission_aov:
aovs.insert(asr.AOV('emission_aov', {}))
if asr_scene_props.glossy_aov:
aovs.insert(asr.AOV('glossy_aov', {}))
if asr_scene_props.indirect_diffuse_aov:
aovs.insert(asr.AOV('indirect_diffuse_aov', {}))
if asr_scene_props.indirect_glossy_aov:
aovs.insert(asr.AOV('indirect_glossy_aov', {}))
if asr_scene_props.invalid_samples_aov:
aovs.insert(asr.AOV('invalid_samples_aov', {}))
if asr_scene_props.normal_aov:
aovs.insert(asr.AOV('normal_aov', {}))
if asr_scene_props.npr_contour_aov:
aovs.insert(asr.AOV('npr_contour_aov', {}))
if asr_scene_props.npr_shading_aov:
aovs.insert(asr.AOV('npr_shading_aov', {}))
if asr_scene_props.pixel_sample_count_aov:
aovs.insert(asr.AOV('pixel_sample_count_aov', {}))
if asr_scene_props.pixel_time_aov:
aovs.insert(asr.AOV('pixel_time_aov', {}))
if asr_scene_props.pixel_variation_aov:
aovs.insert(asr.AOV('pixel_variation_aov', {}))
if asr_scene_props.position_aov:
aovs.insert(asr.AOV('position_aov', {}))
if asr_scene_props.screen_space_velocity_aov:
aovs.insert(asr.AOV('screen_space_velocity_aov', {}))
if asr_scene_props.uv_aov:
aovs.insert(asr.AOV('uv_aov', {}))
if asr_scene_props.cryptomatte_material_aov:
aovs.insert(asr.AOV('cryptomatte_material_aov', {}))
if asr_scene_props.cryptomatte_object_aov:
aovs.insert(asr.AOV('cryptomatte_object_aov', {}))
return aovs
def __set_post_process(self, depsgraph):
asr_scene_props = depsgraph.scene_eval.appleseed
for index, stage in enumerate(asr_scene_props.post_processing_stages):
if stage.model == 'render_stamp_post_processing_stage':
params = {'order': index, 'format_string': stage.render_stamp}
else:
params = {'order': index,
'color_map': stage.color_map,
'auto_range': stage.auto_range,
'range_min': stage.range_min,
'range_max': stage.range_max,
'add_legend_bar': stage.add_legend_bar,
'legend_bar_ticks': stage.legend_bar_ticks,
'render_isolines': stage.render_isolines,
'line_thickness': stage.line_thickness}
if stage.color_map == 'custom':
params['color_map_file_path'] = stage.color_map_file_path
post_process = asr.PostProcessingStage(stage.model, stage.name, params)
logger.debug("Adding Post Process: %s", stage.name)
self.__frame.post_processing_stages().insert(post_process)
def __calc_initial_positions(self, depsgraph, engine, objects_to_add):
logger.debug("appleseed: Setting intial object positions for frame %s", depsgraph.scene_eval.frame_current)
self.__as_camera_translator.add_cam_xform(0.0, engine)
for inst in depsgraph.object_instances:
if inst.show_self:
obj, inst_id = self.__get_instance_data(inst)
if obj in objects_to_add.keys():
objects_to_add[obj].add_instance_step(0.0, inst_id, inst.matrix_world)
def __calc_motion_steps(self, depsgraph, engine, objects_to_add):
self.__current_frame = depsgraph.scene_eval.frame_current
logger.debug("appleseed: Processing motion steps for frame %s", self.__current_frame)
for index, time in enumerate(self.__all_times[1:]):
new_frame = self.__current_frame + time
int_frame = math.floor(new_frame)
subframe = new_frame - int_frame
engine.frame_set(int_frame, subframe=subframe)
if time in self.__cam_times:
self.__as_camera_translator.add_cam_xform(time, engine)
if time in self.__xform_times:
for inst in depsgraph.object_instances:
if inst.show_self:
obj, inst_id = self.__get_instance_data(inst)
if obj in objects_to_add.keys():
objects_to_add[obj].add_instance_step(time, inst_id, inst.matrix_world)
if time in self.__deform_times:
for translator in objects_to_add.values():
translator.set_deform_key(time, depsgraph, index)
engine.frame_set(self.__current_frame, subframe=0.0)
def __load_searchpaths(self):
logger.debug("appleseed: Loading searchpaths")
paths = self.__project.get_search_paths()
paths.extend(path for path in self.__asset_handler.searchpaths if path not in paths)
self.__project.set_search_paths(paths)
def __update_frame_size(self, depsgraph):
frame_params = self.__translate_frame(depsgraph)
self.__frame = asr.Frame("beauty", frame_params, asr.AOVContainer())
self.__project.set_frame(self.__frame)
self.__frame = self.__project.get_frame()
# Static utility methods
@staticmethod
def __get_sub_frames(scene, shutter_length, samples, times):
assert samples > 1
segment_size = shutter_length / (samples - 1)
for seg in range(0, samples):
times.update({scene.appleseed.shutter_open + (seg * segment_size)})
@staticmethod
def __get_instance_data(instance):
if instance.is_instance: # Instance was generated by a particle system or dupli object.
obj = instance.instance_object.original
inst_id = f"{obj.appleseed.obj_name}|{instance.parent.original.name_full}|{instance.persistent_id[0]}"
else: # Instance is a discreet object in the scene.
obj = instance.object.original
inst_id = f"{obj.appleseed.obj_name}|{instance.persistent_id[0]}"
return obj, inst_id
@staticmethod
def __round_up_pow2(deformation_blur_samples):
assert (deformation_blur_samples >= 2)
return 1 << (deformation_blur_samples - 1).bit_length()
|
appleseedhq/blenderseed
|
translators/scene.py
|
Python
|
mit
| 42,867
|
[
"VisIt"
] |
95e97391f6e4b21cc6a0fd8a1453cd377ab77ab9fdaaad6d522bd909193cf57f
|
# Copyright (c) 2014, Alexander Korobeynikov
# https://github.com/alveko/easymock
# License: BSD
import os, re, sys
from collections import namedtuple
sys.path.extend(['/ericsson/tools/pycparser'])
from pycparser import c_parser, c_ast, parse_file, c_generator
FuncDecl = namedtuple('FuncDecl',
[ 'name', 'return_type', 'void', 'nonvoid',
'params', 'vargs', 'wrap',
'full_decl', 'file_line' ])
FuncParam = namedtuple('FuncParam',
[ 'name', 'type', 'type_nonconst', 'type_name',
'type_name_nonconst', 'type_basic' ])
class FuncDeclVisitor(c_ast.NodeVisitor):
def __init__(self, args):
self.args = args
self.filename = ""
self.filebase = ""
self.funcdecls = []
self.ast = None
self.typedefs = {}
self.cgen = c_generator.CGenerator()
def parse(self, fileprep):
self.funcdecls = []
self.fileprep = fileprep
self.filename = re.sub(r"(\w+.[ch]).*$", r"\1",
os.path.basename(fileprep))
# parse and visit functions
self.ast = parse_file(self.fileprep)
self.savefuncs = False
self.visit(self.ast)
self.savefuncs = True
self.visit(self.ast)
def fix_pointer_spaces(self, code):
code = re.sub(r" +\*", r"*", code)
code = re.sub(r"\*([^\* ])", r"* \1", code)
return code
def typename_to_type(self, typename, name):
# remove array endings: type var[] -> type* var
t, tprev = typename, ""
while t != tprev:
tprev = t
t = re.sub(r"^(.+) " + name + r"(.*)\[\d*\]$",
r"\1* " + name + r"\2", tprev)
# remove parameter name
t = re.sub(r"^(.+[^\w])" + name, r"\1", t).strip()
return t
def put_type(self, tname, ttype, overwrite=True):
if not tname in self.typedefs or overwrite:
self.typedefs[tname] = ttype
def get_type(self, tname):
if tname in self.typedefs:
return self.typedefs[tname]
else:
return tname
def type_to_basic_type(self, type):
# remove all "const" from the type
type = re.sub(r"\bconst\b", r"", type)
type = re.sub(r"\s+", r" ", type).strip()
# translate into basic type
def typerepl(m):
return self.get_type(m.group(1))
type = re.sub(r"(struct \w+)", typerepl, type)
type = re.sub(r"(union \w+)", typerepl, type)
type = re.sub(r"(\w+)", typerepl, type)
if "(*" in type:
type = "function*"
# fix pointers and return
return self.fix_pointer_spaces(type)
def visit_NS(self, node, ns):
stype = ("custom_" if node.decls else "opaque_") + ns
if node.name:
sname = ns + " " + node.name
self.put_type(sname, stype, node.decls)
return self.get_type(sname)
else:
return stype
def visit_Struct(self, node):
return self.visit_NS(node, "struct")
def visit_Union(self, node):
return self.visit_NS(node, "union")
def visit_Typedef(self, node):
self.generic_visit(node)
def resolve_typedecl(node):
if isinstance(node.type, c_ast.Struct):
return self.visit_Struct(node.type)
elif isinstance(node.type, c_ast.Union):
return self.visit_Union(node.type)
elif isinstance(node.type, c_ast.Enum):
return "int"
else:
return self.get_type(self.cgen.visit(node.type))
if isinstance(node.type, c_ast.TypeDecl):
self.put_type(node.name, resolve_typedecl(node.type))
elif isinstance(node.type, c_ast.PtrDecl) and \
isinstance(node.type.type, c_ast.TypeDecl):
self.put_type(node.name, resolve_typedecl(node.type.type) + "*")
else:
self.put_type(node.name, "unresolved")
def visit_Decl(self, node):
self.generic_visit(node)
if isinstance(node.type, c_ast.FuncDecl) and self.savefuncs:
self.handle_FuncDecl(node)
def handle_FuncDecl(self, node):
func_name = node.name
self.args.func_all = (self.args.func_all or
not self.args.func and not self.args.func_pfx and
not self.args.wrap and not self.args.wrap_pfx)
cond_func = (func_name in self.args.func or self.args.func_all or
[ pfx for pfx in self.args.func_pfx
if func_name.startswith(pfx) ] )
cond_wrap = (func_name in self.args.wrap or self.args.wrap_all or
[ pfx for pfx in self.args.wrap_pfx
if func_name.startswith(pfx) ] )
cond_file = (self.args.include_all or
os.path.basename(node.coord.file) in self.args.include or
os.path.basename(node.coord.file) == self.filename)
if (cond_file and (cond_func or cond_wrap) and
not filter(lambda x: x.name == (self.args.add_func_pfx + func_name),
self.funcdecls)):
# get rid of possible "extern" qualifiers
node.storage = []
file_line = str(node.coord.file) + ":" + str(node.coord.line)
type_name = self.fix_pointer_spaces(self.cgen.visit_Typename(node.type))
rtrn_type = self.typename_to_type(type_name, func_name)
full_decl = self.fix_pointer_spaces(self.cgen.visit_Decl(node))
full_decl = re.sub(r"([\(,])\s*", r"\1\n ", full_decl)
print("Function found: %s" % (func_name))
func = FuncDecl(name = self.args.add_func_pfx + func_name,
return_type = rtrn_type,
void = (rtrn_type == 'void'),
nonvoid = (rtrn_type != 'void'),
params = [],
vargs = ("..." in full_decl),
wrap = cond_wrap,
full_decl = full_decl,
file_line = file_line)
self.funcdecls.append(func)
if node.type.args:
for i, param in enumerate(node.type.args.params):
ptype_name = self.fix_pointer_spaces(self.cgen.visit(param))
if (hasattr(param, 'type') and \
hasattr(param.type, 'quals') and 'const' in param.type.quals):
param.type.quals.remove('const')
ptype_name_nonconst = \
self.fix_pointer_spaces(self.cgen.visit(param))
pname = ""
if isinstance(param, c_ast.Decl):
pname = param.name
if isinstance(param, c_ast.Typename) and ptype_name != 'void':
# name omitted in declaration
pname = "_em_param%d" % (i + 1)
ptype_name += " " + pname
if pname:
ptype = self.typename_to_type(ptype_name, pname)
ptype_nonconst = self.typename_to_type(ptype_name_nonconst, pname)
ptype_basic = self.type_to_basic_type(ptype)
if ptype_basic != "function*":
ptype_name_nonconst="%s %s" % (ptype_nonconst, pname)
fp = FuncParam(name = pname, type = ptype,
type_nonconst = ptype_nonconst,
type_name = ptype_name,
type_name_nonconst = ptype_name_nonconst,
type_basic = ptype_basic)
func.params.append(fp)
|
alveko/easymock
|
gen/funcdecl.py
|
Python
|
bsd-3-clause
| 8,048
|
[
"VisIt"
] |
d21efe8423586ab981c88d06b5151b2ff4de608d89ac98291d739b195d0a3207
|
import getroot
import sys
# LO cross sections from http://cms.cern.ch/iCMS/jsp/mcprod/admin/requestmanagement.jsp
# NNLO cross sections from https://twiki.cern.ch/twiki/bin/viewauth/CMS/StandardModelCrossSectionsat8TeV
datasets = {
'DYJets': {
'name': "DYJets",
'file': "madgraphSummer12",
'xsec': 2950.0,
'nlo': 3503.71,
'feff': 1.0,
'events': 30459503,
'weightf': 1.0,
},
'DYToMuMu': {
'name': "DYToMuMu",
'file': "powhegSummer12",
'xsec': 1871.0,
'nlo': 1915.08,
'feff': 1.0,
'events': 48819386,
'weightf': 1.0,
},
'QCD': {
'name': "QCD",
'file': 'qcd',
'xsec': 3.64e8,
'nlo': 3.64e8, #LO
'feff': 3.70e-4,
'events': 21484602,
'weightf': 1.0,
},
'TTbar': {
'name': "TTJets",
'file': 'ttbar',
'xsec': 126.0,
'nlo': 225.197,
'feff': 1.0,
'events': 5186494,
'weightf': 1.0,
},
'WJets': {
'name': "WJets",
'file': 'wjets',
'xsec': 30400.0,
'nlo': 37509.0,
'feff': 1.0,
'events': 57709905,
'weightf': 1.0,
},
'data': {
'name': "Data",
'file': "data",
'type': "data",
'feff': 0.965**2,
}
}
# conversion from /fb to /pb.
fb2pb = 1000
def main():
files, lumi, cuts, verb, weights, nmin, style, nlo = getopt(sys.argv)
values = []
for f in files:
# search for parameters in datasets:
item = {}
for d in datasets.values():
if d['file'].lower() in f.lower():
item = d
item['file'] = f
break
if not item:
print "No parameters found for", f
exit(1)
# calculate expectations
if 'type' in item and item['type'] == 'data':
item['f'] = 1.0/lumi/datasets['data']['feff']
else:
if nlo:
item['f'] = item['nlo']*item['feff']/item['events']/item['weightf']*fb2pb
else:
item['f'] = item['xsec']*item['feff']/item['events']/item['weightf']*fb2pb
# read numbers from file
rootfile = getroot.openfile(f)
for c in cuts:
item[c] = {}
obj = getroot.getobjectfromnick('npv', rootfile, {'incut': c})
if weights:
n = obj.Integral()
else:
n = obj.GetEntries()
item[c]['n'] = max(nmin, n)
item[c]['exp'] = item[c]['n']*item['f']*lumi*datasets['data']['feff']
if files.index(f) > 0:
item[c]['pro'] = item[c]['n']*item['f']*lumi*datasets['data']['feff']*100/values[0][c]['n']
else:
item[c]['pro'] = item['f']*lumi*datasets['data']['feff']*100
values.append(item)
if style == 'dict':
printdicts(values)
else:
prints[style](transpose(maketable(values, cuts, lumi, nlo)))
def maketable(content, cuts, lumi, useNLO):
result = [["Dataset", "xsec", "events", "feff"]]
for c in cuts:
result[-1] += [c, "events", "exp.", r" % "]
for i in content:
if 'ata' in i['name']:
result += [[i['name'], "", "L =%6.2f" % lumi, ""]]
else:
if useNLO:
result += [[i['name'], i['nlo'], i['events'], i['feff']]]
else:
result += [[i['name'], i['xsec'], i['events'], i['feff']]]
for c in cuts:
if 'ata' in i['name']:
result[-1] += ["", i[c]['n'], "", i[c]['pro']]
else:
result[-1] += ["", i[c]['n'], i[c]['exp'], i[c]['pro']]
return result
def printdicts(content):
for i in content:
printdict(i)
def printtxt(content):
print "Results:"
for i in content:
for j in i:
if type(j) == str:
print "%12s" %j,
elif type(j) == int:
print "%12d" % j,
elif type(j) == float:
print "%12.2f" % j,
print
def printlatex(content):
print "\\begin{table}[htb]"
print "\t\\centering"
print "\t\\begin{tabular}{l" + "r"*(len(content[0])-1) + "}"
for i in content:
#print "\t\t",
for j in i:
if type(j) == str:
if i == content[0]:
print "%16s &" % ("\\textbf{%s}" %j),
else:
print "%16s &" %j,
elif type(j) == int:
print "%16d &" % j,
elif type(j) == float:
print "%16.2f &" % j,
print "\b\b\\\\"
print "\t\\end{tabular}"
print "\t\\caption{Background estimation}"
print "\t\\label{tab:background}"
print "\\end{table}"
def printlist(content):
for i in content:
print i
def printdict(dic):
print "%s: {" % dic['name']
for k, v in dic.items():
print " %10s: %s" % (k, v)
print "}"
def transpose(table):
result = []
for l in range(len(table)):
for e in range(len(table[l])):
if l == 0:
result.append([])
result[e].append(table[l][e])
return result
prints = {
'dict': printdicts,
'txt': printtxt,
'latex': printlatex,
'list': printlist,
}
def getopt(args):
lumi = 1.0
nmin = 3
files = []
cutfolders = ['allevents', 'zcutsonly', 'alleta', 'incut']
verbose = False
weights = True
nlo = False
style = 'txt'
program = args.pop(0)
while len(args) > 0:
try:
if args[0] == "-l":
lumi = float(args.pop(1))
elif args[0] == "-m":
nmin = float(args.pop(1))
elif args[0] == "-s":
style = args.pop(1)
elif args[0] == "-c":
cutfolders = []
while len(args) > 1 and args[1][0] != '-':
cutfolders.append(args.pop(1))
elif args[0][0] != "-":
files.append(args[0])
elif args[0] == '--':
pass
elif args[0] == '-v':
verbose = True
elif args[0] == "-n":
nlo = True
elif args[0] == "-w":
weights = False
elif args[0] == "-h":
pass
else:
print args[0], "not found!"
except:
print "Bad arguments!"
args.append("-h")
if args[0] == '-h':
print "Usage:", program, "[options] data.root signalmc.root bkg1.root bkg2.root"
print "Options:"
print " -c specify cut folders, default:", ", ".join(cutfolders)
print " -h Show this help"
print " -l luminosity of data sample in /fb (default: %1.2f)" % lumi
print " -m upper limit for number of events (default: %d)" % nmin
print " -n use NNLO cross sections"
print " -s table style [txt, latex, list, dict] (default: %s)" % style
print " -v verbosity"
print " -w do not use event weights"
exit(0)
del args[0]
if verbose:
print "Files: ", "\n ".join(files)
print "Lumi: ", lumi, "/fb"
print "Cuts: ", ", ".join(cutfolders)
if weights:
print "Weights: Use event weights."
return files, lumi, cutfolders, verbose, weights, nmin, style, nlo
if __name__ == "__main__":
main()
|
dhaitz/CalibFW
|
plotting/modules/background.py
|
Python
|
gpl-2.0
| 6,060
|
[
"FEFF"
] |
fe6bafe4694181b34a30b534ce694dc6d63b032443f1cc499af40ec54b1a45fd
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert SVN based DEPS into .DEPS.git for use with NewGit."""
import optparse
import os
import sys
import deps_utils
import git_tools
import svn_to_git_public
def SplitScmUrl(url):
"""Given a repository, return a set containing the URL and the revision."""
url_split = url.split('@')
scm_url = url_split[0]
scm_rev = 'HEAD'
if len(url_split) == 2:
scm_rev = url_split[1]
return (scm_url, scm_rev)
def SvnRevToGitHash(svn_rev, git_url, repos_path, workspace, dep_path,
git_host, svn_branch_name=None):
"""Convert a SVN revision to a Git commit id."""
git_repo = None
if git_url.startswith(git_host):
git_repo = git_url.replace(git_host, '')
else:
raise Exception('Unknown git server %s, host %s' % (git_url, git_host))
if repos_path is None and workspace is None:
# We're running without a repository directory (i.e. no -r option).
# We cannot actually find the commit id, but this mode is useful
# just for testing the URL mappings. Produce an output file that
# can't actually be used, but can be eyeballed for correct URLs.
return 'xxx-r%s' % svn_rev
if repos_path:
git_repo_path = os.path.join(repos_path, git_repo)
mirror = True
else:
git_repo_path = os.path.join(workspace, dep_path)
mirror = False
if not os.path.exists(git_repo_path):
git_tools.Clone(git_url, git_repo_path, mirror)
git_tools.Fetch(git_repo_path, git_url, mirror)
if svn_branch_name:
# svn branches are mirrored with:
# branches = branches/*:refs/remotes/branch-heads/*
if mirror:
refspec = 'refs/branch-heads/' + svn_branch_name
else:
refspec = 'refs/remotes/branch-heads/' + svn_branch_name
return git_tools.Search(git_repo_path, svn_rev, mirror, refspec)
else:
if mirror:
refspec = 'refs/heads/master'
else:
refspec = 'refs/remotes/origin/master'
return git_tools.Search(git_repo_path, svn_rev, mirror, refspec)
def ConvertDepsToGit(deps, options, deps_vars, svn_deps_vars):
"""Convert a 'deps' section in a DEPS file from SVN to Git."""
new_deps = {}
bad_git_urls = set([])
svn_to_git_objs = [svn_to_git_public]
if options.extra_rules:
rules_dir, rules_file = os.path.split(options.extra_rules)
rules_file_base = os.path.splitext(rules_file)[0]
sys.path.insert(0, rules_dir)
svn_to_git_objs.insert(0, __import__(rules_file_base))
deps_overrides = {}
# Allow extra_rules file to override rules in public file.
for svn_to_git_obj in reversed(svn_to_git_objs):
deps_overrides.update(getattr(svn_to_git_obj, 'DEPS_OVERRIDES', {}))
for dep in deps:
if not deps[dep]: # dep is 'None' and emitted to exclude the dep
new_deps[dep] = None
continue
# Get the URL and the revision/hash for this dependency.
dep_url, dep_rev = SplitScmUrl(deps[dep])
path = dep
git_url = dep_url
svn_branch = None
if not dep_url.endswith('.git'):
# Convert this SVN URL to a Git URL.
for svn_git_converter in svn_to_git_objs:
converted_data = svn_git_converter.SvnUrlToGitUrl(dep, dep_url)
if converted_data:
path, git_url = converted_data[:2]
git_host = svn_git_converter.GIT_HOST
if len(converted_data) > 2:
svn_branch = converted_data[2]
break
else:
# We skip this path, this must not be required with Git.
continue
if options.verify:
print >> sys.stderr, 'checking ' + git_url + '...',
if git_tools.Ping(git_url):
print >> sys.stderr, ' success'
else:
print >> sys.stderr, ' failure'
bad_git_urls.update([git_url])
# Get the Git hash based off the SVN rev.
git_hash = ''
if dep_rev != 'HEAD':
if dep in deps_overrides:
# Transfer any required variables over from SVN DEPS.
if not deps_overrides[dep] in svn_deps_vars:
raise Exception('Missing DEPS variable: %s' % deps_overrides[dep])
deps_vars[deps_overrides[dep]] = (
'@' + svn_deps_vars[deps_overrides[dep]].lstrip('@'))
# Tag this variable as needing a transform by Varify() later.
git_hash = '%s_%s' % (deps_utils.VARIFY_MARKER_TAG_PREFIX,
deps_overrides[dep])
else:
# Pass-through the hash for Git repositories. Resolve the hash for
# subversion repositories.
if dep_url.endswith('.git'):
git_hash = '@%s' % dep_rev
else:
git_hash = '@%s' % SvnRevToGitHash(
dep_rev, git_url, options.repos, options.workspace, path,
git_host, svn_branch)
# If this is webkit, we need to add the var for the hash.
if dep == 'src/third_party/WebKit' and dep_rev:
deps_vars['webkit_rev'] = git_hash
git_hash = 'VAR_WEBKIT_REV'
# Add this Git dep to the new deps.
new_deps[path] = '%s%s' % (git_url, git_hash)
return new_deps, bad_git_urls
def main():
parser = optparse.OptionParser()
parser.add_option('-d', '--deps', default='DEPS',
help='path to the DEPS file to convert')
parser.add_option('-o', '--out',
help='path to the converted DEPS file (default: stdout)')
parser.add_option('-t', '--type',
help='[DEPRECATED] type of DEPS file (public, etc)')
parser.add_option('-x', '--extra-rules',
help='Path to file with additional conversion rules.')
parser.add_option('-r', '--repos',
help='path to the directory holding all the Git repos')
parser.add_option('-w', '--workspace', metavar='PATH',
help='top level of a git-based gclient checkout')
parser.add_option('--verify', action='store_true',
help='ping each Git repo to make sure it exists')
options = parser.parse_args()[0]
# Get the content of the DEPS file.
deps_content = deps_utils.GetDepsContent(options.deps)
(deps, deps_os, include_rules, skip_child_includes, hooks,
svn_deps_vars) = deps_content
if options.extra_rules and options.type:
parser.error('Can\'t specify type and extra-rules at the same time.')
elif options.type:
options.extra_rules = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'svn_to_git_%s.py' % options.type)
if options.extra_rules and not os.path.exists(options.extra_rules):
raise Exception('Can\'t locate rules file "%s".' % options.extra_rules)
# Create a var containing the Git and Webkit URL, this will make it easy for
# people to use a mirror instead.
git_url = 'https://chromium.googlesource.com'
deps_vars = {
'git_url': git_url,
'webkit_url': git_url + '/chromium/blink.git',
}
# Convert the DEPS file to Git.
deps, baddeps = ConvertDepsToGit(deps, options, deps_vars, svn_deps_vars)
for os_dep in deps_os:
deps_os[os_dep], os_bad_deps = ConvertDepsToGit(
deps_os[os_dep], options, deps_vars, svn_deps_vars)
baddeps = baddeps.union(os_bad_deps)
if baddeps:
print >> sys.stderr, ('\nUnable to resolve the following repositories. '
'Please make sure\nthat any svn URLs have a git mirror associated with '
'them.\nTo see the exact error, run `git ls-remote [repository]` where'
'\n[repository] is the URL ending in .git (strip off the @revision\n'
'number.) For more information, visit http://code.google.com\n'
'/p/chromium/wiki/UsingNewGit#Adding_new_repositories_to_DEPS.\n')
for dep in baddeps:
print >> sys.stderr, ' ' + dep
return 2
else:
if options.verify:
print >> sys.stderr, ('\nAll referenced repositories were successfully '
'resolved.')
return 0
# Write the DEPS file to disk.
deps_utils.WriteDeps(options.out, deps_vars, deps, deps_os, include_rules,
skip_child_includes, hooks)
return 0
if '__main__' == __name__:
sys.exit(main())
|
windyuuy/opera
|
chromium/src/tools/deps2git/deps2git.py
|
Python
|
bsd-3-clause
| 8,156
|
[
"VisIt"
] |
9520eab633b42d8ffb4c965d96e9202d362537dc6cd51bea21b0ca48c703a1ff
|
""" Comprehension patterns transforms list comprehension into intrinsics. """
from pythran.analyses import OptimizableComprehension
from pythran.passmanager import Transformation
from pythran.transformations.normalize_tuples import ConvertToTuple
from pythran.conversion import mangle
from pythran.utils import attr_to_path, path_to_attr
import gast as ast
class ComprehensionPatterns(Transformation):
'''
Transforms list comprehension into intrinsics.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("def foo(y) : return (x for x in y)")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(ComprehensionPatterns, node)
>>> 'map' in pm.dump(backend.Python, node)
True
>>> node = ast.parse("def foo(y) : return [0 for _ in builtins.range(y)]")
>>> _, node = pm.apply(ComprehensionPatterns, node)
>>> print(pm.dump(backend.Python, node))
def foo(y):
return ([0] * builtins.len(builtins.range(y)))
'''
def __init__(self):
Transformation.__init__(self, OptimizableComprehension)
def visit_Module(self, node):
self.use_itertools = False
self.generic_visit(node)
if self.use_itertools:
import_alias = ast.alias(name='itertools',
asname=mangle('itertools'))
importIt = ast.Import(names=[import_alias])
node.body.insert(0, importIt)
return node
def make_Iterator(self, gen):
if gen.ifs:
ldFilter = ast.Lambda(
ast.arguments([ast.Name(gen.target.id, ast.Param(),
None, None)],
[], None, [], [], None, []),
ast.BoolOp(ast.And(), gen.ifs)
if len(gen.ifs) > 1 else gen.ifs[0])
ifilterName = ast.Attribute(
value=ast.Name(id='builtins',
ctx=ast.Load(),
annotation=None, type_comment=None),
attr='filter', ctx=ast.Load())
return ast.Call(ifilterName, [ldFilter, gen.iter], [])
else:
return gen.iter
def visitComp(self, node, make_attr):
if node in self.optimizable_comprehension:
self.update = True
self.generic_visit(node)
iters = [self.make_Iterator(gen) for gen in node.generators]
variables = [ast.Name(gen.target.id, ast.Param(), None, None)
for gen in node.generators]
# If dim = 1, product is useless
if len(iters) == 1:
iterAST = iters[0]
varAST = ast.arguments([variables[0]], [],
None, [], [], None, [])
else:
self.use_itertools = True
prodName = ast.Attribute(
value=ast.Name(id=mangle('itertools'),
ctx=ast.Load(),
annotation=None, type_comment=None),
attr='product', ctx=ast.Load())
varid = variables[0].id # retarget this id, it's free
renamings = {v.id: (i,) for i, v in enumerate(variables)}
node.elt = ConvertToTuple(varid, renamings).visit(node.elt)
iterAST = ast.Call(prodName, iters, [])
varAST = ast.arguments([ast.Name(varid, ast.Param(),
None, None)],
[], None, [], [], None, [])
ldBodymap = node.elt
ldmap = ast.Lambda(varAST, ldBodymap)
return make_attr(ldmap, iterAST)
else:
return self.generic_visit(node)
def visit_ListComp(self, node):
def makeattr(*args):
r = ast.Attribute(
value=ast.Name(id='builtins',
ctx=ast.Load(),
annotation=None,
type_comment=None),
attr='map', ctx=ast.Load())
r = ast.Call(r, list(args), [])
r = ast.Call(ast.Attribute(ast.Name('builtins', ast.Load(),
None, None),
'list', ast.Load()),
[r], [])
return r
if isinstance(node.elt, ast.Constant) and len(node.generators) == 1:
gen = node.generators[0]
if not gen.ifs and isinstance(gen.iter, ast.Call):
try:
path = attr_to_path(gen.iter.func)[1]
range_path = 'pythonic', 'builtins', 'functor', 'range'
if path == range_path and len(gen.iter.args) == 1:
self.update = True
return ast.BinOp(
ast.List([node.elt], ast.Load()),
ast.Mult(),
ast.Call(path_to_attr(('builtins', 'len')),
[gen.iter],
[]))
except TypeError:
pass
return self.visitComp(node, makeattr)
def visit_GeneratorExp(self, node):
def makeattr(*args):
return ast.Call(ast.Attribute(
value=ast.Name(id='builtins',
ctx=ast.Load(),
annotation=None, type_comment=None),
attr='map', ctx=ast.Load()), list(args), [])
return self.visitComp(node, makeattr)
|
pombredanne/pythran
|
pythran/optimizations/comprehension_patterns.py
|
Python
|
bsd-3-clause
| 5,680
|
[
"VisIt"
] |
8252f163bfdc71c42fbad7239b116be39fc637605000be0f76b8365ac7b9e8dc
|
from direct.showbase.PythonUtil import randFloat, normalDistrib, Enum
from direct.showbase.PythonUtil import clampScalar
from toontown.toonbase import TTLocalizer, ToontownGlobals
import random, copy
TraitDivisor = 10000
def getTraitNames():
if not hasattr(PetTraits, 'TraitNames'):
traitNames = []
for desc in PetTraits.TraitDescs:
traitNames.append(desc[0])
PetTraits.TraitNames = traitNames
return PetTraits.TraitNames
def uniform(min, max, rng):
return randFloat(min, max, rng.random)
def gaussian(min, max, rng):
return normalDistrib(min, max, rng.gauss)
class TraitDistribution:
TraitQuality = Enum('VERY_BAD, BAD, AVERAGE, GOOD, VERY_GOOD')
TraitTypes = Enum('INCREASING, DECREASING')
Sz2MinMax = None
TraitType = None
TraitCutoffs = {TraitTypes.INCREASING: {TraitQuality.VERY_BAD: 0.1,
TraitQuality.BAD: 0.25,
TraitQuality.GOOD: 0.75,
TraitQuality.VERY_GOOD: 0.9},
TraitTypes.DECREASING: {TraitQuality.VERY_BAD: 0.9,
TraitQuality.BAD: 0.75,
TraitQuality.GOOD: 0.25,
TraitQuality.VERY_GOOD: 0.1}}
def __init__(self, rndFunc = gaussian):
self.rndFunc = rndFunc
if not hasattr(self.__class__, 'GlobalMinMax'):
_min = 1.0
_max = 0.0
minMax = self.Sz2MinMax
for sz in minMax:
thisMin, thisMax = minMax[sz]
_min = min(_min, thisMin)
_max = max(_max, thisMax)
self.__class__.GlobalMinMax = [_min, _max]
def getRandValue(self, szId, rng = random):
min, max = self.getMinMax(szId)
return self.rndFunc(min, max, rng)
def getHigherIsBetter(self):
return self.TraitType == TraitDistribution.TraitTypes.INCREASING
def getMinMax(self, szId):
return (self.Sz2MinMax[szId][0], self.Sz2MinMax[szId][1])
def getGlobalMinMax(self):
return (self.GlobalMinMax[0], self.GlobalMinMax[1])
def _getTraitPercent(self, traitValue):
gMin, gMax = self.getGlobalMinMax()
if traitValue < gMin:
gMin = traitValue
elif traitValue > gMax:
gMax = traitValue
return (traitValue - gMin) / (gMax - gMin)
def getPercentile(self, traitValue):
if self.TraitType is TraitDistribution.TraitTypes.INCREASING:
return self._getTraitPercent(traitValue)
else:
return 1.0 - self._getTraitPercent(traitValue)
def getQuality(self, traitValue):
TraitQuality = TraitDistribution.TraitQuality
TraitCutoffs = self.TraitCutoffs[self.TraitType]
percent = self._getTraitPercent(traitValue)
if self.TraitType is TraitDistribution.TraitTypes.INCREASING:
if percent <= TraitCutoffs[TraitQuality.VERY_BAD]:
return TraitQuality.VERY_BAD
elif percent <= TraitCutoffs[TraitQuality.BAD]:
return TraitQuality.BAD
elif percent >= TraitCutoffs[TraitQuality.VERY_GOOD]:
return TraitQuality.VERY_GOOD
elif percent >= TraitCutoffs[TraitQuality.GOOD]:
return TraitQuality.GOOD
else:
return TraitQuality.AVERAGE
elif percent <= TraitCutoffs[TraitQuality.VERY_GOOD]:
return TraitQuality.VERY_GOOD
elif percent <= TraitCutoffs[TraitQuality.GOOD]:
return TraitQuality.GOOD
elif percent >= TraitCutoffs[TraitQuality.VERY_BAD]:
return TraitQuality.VERY_BAD
elif percent >= TraitCutoffs[TraitQuality.BAD]:
return TraitQuality.BAD
else:
return TraitQuality.AVERAGE
def getExtremeness(self, traitValue):
percent = self._getTraitPercent(traitValue)
if percent < 0.5:
howExtreme = (0.5 - percent) * 2.0
else:
howExtreme = (percent - 0.5) * 2.0
return clampScalar(howExtreme, 0.0, 1.0)
class PetTraits:
class StdIncDistrib(TraitDistribution):
TraitType = TraitDistribution.TraitTypes.INCREASING
Sz2MinMax = {ToontownGlobals.ToontownCentral: (0.2, 0.65),
ToontownGlobals.DonaldsDock: (0.3, 0.7),
ToontownGlobals.DaisyGardens: (0.4, 0.75),
ToontownGlobals.MinniesMelodyland: (0.5, 0.8),
ToontownGlobals.TheBrrrgh: (0.6, 0.85),
ToontownGlobals.DonaldsDreamland: (0.7, 0.9),
ToontownGlobals.FunnyFarm: (0.8, 0.9)}
class StdDecDistrib(TraitDistribution):
TraitType = TraitDistribution.TraitTypes.DECREASING
Sz2MinMax = {ToontownGlobals.ToontownCentral: (0.35, 0.8),
ToontownGlobals.DonaldsDock: (0.3, 0.7),
ToontownGlobals.DaisyGardens: (0.25, 0.6),
ToontownGlobals.MinniesMelodyland: (0.2, 0.5),
ToontownGlobals.TheBrrrgh: (0.15, 0.4),
ToontownGlobals.DonaldsDreamland: (0.1, 0.3),
ToontownGlobals.FunnyFarm: (0.05, 0.2)}
class ForgetfulnessDistrib(TraitDistribution):
TraitType = TraitDistribution.TraitTypes.DECREASING
Sz2MinMax = {ToontownGlobals.ToontownCentral: (0.0, 1.0),
ToontownGlobals.DonaldsDock: (0.0, 0.9),
ToontownGlobals.DaisyGardens: (0.0, 0.8),
ToontownGlobals.MinniesMelodyland: (0.0, 0.7),
ToontownGlobals.TheBrrrgh: (0.0, 0.6),
ToontownGlobals.DonaldsDreamland: (0.0, 0.5),
ToontownGlobals.FunnyFarm: (0.0, 0.4)}
TraitDescs = (('forgetfulness', ForgetfulnessDistrib(), True),
('boredomThreshold', StdIncDistrib(), True),
('restlessnessThreshold', StdIncDistrib(), True),
('playfulnessThreshold', StdDecDistrib(), True),
('lonelinessThreshold', StdIncDistrib(), True),
('sadnessThreshold', StdIncDistrib(), True),
('fatigueThreshold', StdIncDistrib(), True),
('hungerThreshold', StdIncDistrib(), True),
('confusionThreshold', StdIncDistrib(), True),
('excitementThreshold', StdDecDistrib(), True),
('angerThreshold', StdIncDistrib(), True),
('surpriseThreshold', StdIncDistrib(), False),
('affectionThreshold', StdDecDistrib(), True))
NumTraits = len(TraitDescs)
class Trait:
def __init__(self, index, traitsObj, value = None):
self.name, distrib, self.hasWorth = PetTraits.TraitDescs[index]
if value is not None:
self.value = value
else:
szId = traitsObj.safeZoneId
self.value = distrib.getRandValue(szId, traitsObj.rng)
self.value = int(self.value * TraitDivisor) / float(TraitDivisor)
self.higherIsBetter = distrib.getHigherIsBetter()
self.percentile = distrib.getPercentile(self.value)
self.quality = distrib.getQuality(self.value)
self.howExtreme = distrib.getExtremeness(self.value)
return
def __repr__(self):
return 'Trait: %s, %s, %s, %s' % (self.name,
self.value,
TraitDistribution.TraitQuality.getString(self.quality),
self.howExtreme)
def __init__(self, traitSeed, safeZoneId, traitValueList = []):
self.traitSeed = traitSeed
self.safeZoneId = safeZoneId
self.rng = random.Random(self.traitSeed)
self.traits = {}
for i in xrange(len(PetTraits.TraitDescs)):
if i < len(traitValueList) and traitValueList[i] > 0.0:
trait = PetTraits.Trait(i, self, traitValueList[i])
else:
trait = PetTraits.Trait(i, self)
self.traits[trait.name] = trait
self.__dict__[trait.name] = trait.value
extremeTraits = []
for trait in self.traits.values():
if not trait.hasWorth:
continue
if trait.quality == TraitDistribution.TraitQuality.AVERAGE:
continue
i = 0
while i < len(extremeTraits) and extremeTraits[i].howExtreme > trait.howExtreme:
i += 1
extremeTraits.insert(i, trait)
self.extremeTraits = []
for trait in extremeTraits:
self.extremeTraits.append((trait.name, trait.quality))
def getValueList(self):
traitValues = []
for desc in PetTraits.TraitDescs:
traitName = desc[0]
traitValues.append(self.traits[traitName].value)
return traitValues
def getTraitValue(self, traitName):
return self.traits[traitName].value
def getExtremeTraits(self):
return copy.copy(self.extremeTraits)
def getOverallValue(self):
total = 0
numUsed = 0
for trait in self.traits.values():
if trait.hasWorth:
if trait.higherIsBetter:
value = trait.value
else:
value = 1.0 - trait.value
total += value
numUsed += 1
value = total / len(self.traits.values())
return value
def getExtremeTraitDescriptions(self):
descs = []
TraitQuality = TraitDistribution.TraitQuality
Quality2index = {TraitQuality.VERY_BAD: 0,
TraitQuality.BAD: 1,
TraitQuality.GOOD: 2,
TraitQuality.VERY_GOOD: 3}
for name, quality in self.extremeTraits:
descs.append(TTLocalizer.PetTrait2descriptions[name][Quality2index[quality]])
return descs
|
Spiderlover/Toontown
|
toontown/pets/PetTraits.py
|
Python
|
mit
| 9,550
|
[
"Gaussian"
] |
7714c8fa423b6c5a93d0cd96736187d528064e62e0a6568406a3bc762384df7f
|
"""Create HDF5 dataset for UCF-101.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
import os
import cPickle as pickle
import h5py
from spikefuel import dataset
def printname(name):
"""print name."""
print name
# paths
db_name = "UCF50_30fps_20160409"
save_path = "/home/inilab/data"
ucf50_data_path = "/home/inilab/data/UCF50"
ucf50_path = "/home/inilab/data/ARCHIVE/UCF-50-ARCHIVE/ucf50_recordings_30fps"
ucf50_stats_path = "./data/ucf50_stats.pkl"
# reading dataset statistaic
f = file(ucf50_stats_path, mode="r")
ucf50_stats = pickle.load(f)
f.close()
# Produce bounding boxes
ucf50_list = ucf50_stats["ucf50_list"]
# inite dataset
dataset.create_ucf50_db(db_name, save_path, ucf50_path, ucf50_stats,
ucf50_data_path)
db = h5py.File(os.path.join(save_path, db_name+".hdf5"), mode="r")
db.visit(printname)
|
duguyue100/spikefuel
|
scripts/create_ucf50_dataset.py
|
Python
|
mit
| 855
|
[
"VisIt"
] |
5bf0c51e747b33b39c7b9028e331b0e8500bf4b22568851ff16eae8a9034a82b
|
import smbl
import snakemake
import os
from ._program import *
BWA = os.path.join(smbl.bin_dir,"bwa")
##########################################
##########################################
class Bwa(Program):
@classmethod
def get_installation_files(cls):
return [BWA]
@classmethod
def supported_platforms(cls):
return ["cygwin","osx","linux"]
@classmethod
def install(cls):
cls.git_clone("http://github.com/lh3/bwa","bwa")
cls.run_make("bwa")
cls.install_file("bwa/bwa",BWA)
##########################################
def __init__(
self,
fasta,
bam,
fastq_1,
fastq_2=None,
):
super().__init__()
self._fa_fn=fasta
self._fq1_fn=fastq_1
self._fq2_fn=fastq_2
self._bam_fn=bam
smbl.utils.Rule(
input=self.make_index_input(),
output=self.make_index_output(),
run=self.make_index,
)
smbl.utils.Rule(
input=self.map_reads_input(),
output=self.map_reads_output(),
run=self.map_reads,
)
def fq_fn(self):
if self._fq2_fn==None:
return [self._fq1_fn]
else:
return [self._fq1_fn,self._fq2_fn]
def fa_fn(self):
return self._fa_fn
def bam_fn(self):
return self._bam_fn
def index_fns(self):
return [
self._fa_fn+".amb",
self._fa_fn+".ann",
self._fa_fn+".bwt",
self._fa_fn+".pac",
self._fa_fn+".sa"
]
##########################################
def make_index(self):
smbl.utils.shell('"{bwa}" index {fa}'.format(
bwa=BWA,
fa=self._fa_fn,
))
def make_index_input(self):
return [
BWA,
self._fa_fn,
]
def make_index_output(self):
return [
self.index_fns(),
]
##########################################
def map_reads(self):
smbl.messages.error("Subclass of class Bwa should be used",program="SMBL",subprogram="BWA")
raise NotImplementedError("Subclass of class Bwa should be used")
def map_reads_input(self):
return [
BWA,
smbl.prog.SAMTOOLS,
self.index_fns(),
self._fa_fn,
self.fq_fn(),
]
def map_reads_output(self):
return [
self.bam_fn(),
]
##########################################
##########################################
class BwaMem(Bwa):
def __init__(
self,
fasta,
bam,
fastq_1,
fastq_2=None,
):
super().__init__(
fasta=fasta,
fastq_1=fastq_1,
fastq_2=fastq_2,
bam=bam,
)
def map_reads(self,number_of_threads=1):
if self._fq2_fn==None:
reads_string='"{}"'.format(self._fq1_fn)
else:
reads_string='"{}" "{}"'.format(self._fq1_fn,self._fq2_fn)
smbl.utils.shell('"{bwa}" mem -t {threads} "{idx}" {reads_string} | "{samtools}" view -bS - > "{bam}"'.format(
bwa=BWA,
samtools=smbl.prog.SAMTOOLS,
idx=self._fa_fn,
reads_string=reads_string,
bam=self._bam_fn,
threads=number_of_threads,
)
)
##########################################
##########################################
class BwaSw(Bwa):
def __init__(
self,
fasta,
bam,
fastq_1,
fastq_2=None,
):
super().__init__(
fasta=fasta,
fastq_1=fastq_1,
fastq_2=fastq_2,
bam=bam,
)
def map_reads(self,number_of_threads=1):
if self._fq2_fn==None:
reads_string='"{}"'.format(self._fq1_fn)
else:
reads_string='"{}" "{}"'.format(self._fq1_fn,self._fq2_fn)
smbl.utils.shell('"{bwa}" bwasw -t {threads} "{idx}" {reads_string} | "{samtools}" view -bS - > "{bam}"'.format(
bwa=BWA,
samtools=smbl.prog.SAMTOOLS,
idx=self._fa_fn,
reads_string=reads_string,
bam=self._bam_fn,
threads=number_of_threads,
)
)
|
karel-brinda/smbl
|
smbl/prog/plugins/bwa.py
|
Python
|
mit
| 3,737
|
[
"BWA"
] |
314d59d68b49e18eeba976c0b7b6ad72f4cee06a6f9b0c3751af60ec9b141bae
|
"""
Mesh generator class. Generates Zinc meshes using scaffoldmaker.
"""
from __future__ import division
import copy
import os
import math
import string
from opencmiss.maths.vectorops import axis_angle_to_rotation_matrix, euler_to_rotation_matrix, matrix_mult, rotation_matrix_to_euler
from opencmiss.utils.zinc.field import fieldIsManagedCoordinates, findOrCreateFieldCoordinates, findOrCreateFieldStoredMeshLocation, findOrCreateFieldStoredString
from opencmiss.utils.zinc.finiteelement import evaluateFieldNodesetRange
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zinc.field import Field, FieldGroup
from opencmiss.zinc.glyph import Glyph
from opencmiss.zinc.graphics import Graphics
from opencmiss.zinc.node import Node
from opencmiss.zinc.result import RESULT_OK, RESULT_WARNING_PART_DONE
from opencmiss.zinc.scene import Scene
from opencmiss.zinc.scenecoordinatesystem import SCENECOORDINATESYSTEM_WORLD
from scaffoldmaker.annotation.annotationgroup import AnnotationGroup, findAnnotationGroupByName
from scaffoldmaker.scaffolds import Scaffolds
from scaffoldmaker.scaffoldpackage import ScaffoldPackage
from scaffoldmaker.utils.exportvtk import ExportVtk
from scaffoldmaker.utils.zinc_utils import group_add_group_elements, group_get_highest_dimension, \
identifier_ranges_fix, identifier_ranges_from_string, identifier_ranges_to_string, mesh_group_to_identifier_ranges
STRING_FLOAT_FORMAT = '{:.8g}'
def parseListFloat(text : str, delimiter=','):
"""
Parse a delimited list of floats from text.
:param text: string containing floats separated by delimiter.
:param delimiter: character delimiter between component values.
:return: list of floats parsed from text.
"""
values = []
for s in text.split(delimiter):
try:
values.append(float(s))
except:
print('Invalid float')
values.append(0.0)
return values
def parseListInt(text : str, delimiter=','):
"""
Parse a delimited list of integers from text.
:param text: string containing integers separated by delimiter.
:param delimiter: character delimiter between component values.
:return: list of integers parsed from text.
"""
values = []
for s in text.split(delimiter):
try:
values.append(int(s))
except:
print('Invalid integer')
values.append(0)
return values
def parseVector3(vectorText : str, delimiter, defaultValue):
"""
Parse a 3 component vector from a string.
Repeats last component if too few.
:param vectorText: string containing vector components separated by delimiter.
:param delimiter: character delimiter between component values.
:param defaultValue: Value to use for invalid components.
:return: list of 3 component values parsed from vectorText.
"""
vector = []
for valueText in vectorText.split(delimiter):
try:
vector.append(float(valueText))
except:
vector.append(defaultValue)
if len(vector) > 3:
vector = vector[:3]
else:
for i in range(3 - len(vector)):
vector.append(vector[-1])
return vector
class MeshGeneratorModel(object):
"""
Framework for generating meshes of a number of types, with mesh type specific options
"""
def __init__(self, context, region, material_module):
super(MeshGeneratorModel, self).__init__()
self._region_name = "generated_mesh"
self._context = context
self._parent_region = region
self._materialmodule = material_module
self._region = None
self._modelCoordinatesField = None
self._fieldmodulenotifier = None
self._currentAnnotationGroup = None
self._customParametersCallback = None
self._sceneChangeCallback = None
self._transformationChangeCallback = None
self._deleteElementRanges = []
self._nodeDerivativeLabels = [ 'D1', 'D2', 'D3', 'D12', 'D13', 'D23', 'D123' ]
# list of nested scaffold packages to that being edited, with their parent option names
# discover all mesh types and set the current from the default
scaffolds = Scaffolds()
self._allScaffoldTypes = scaffolds.getScaffoldTypes()
scaffoldType = scaffolds.getDefaultScaffoldType()
scaffoldPackage = ScaffoldPackage(scaffoldType)
self._parameterSetName = scaffoldType.getParameterSetNames()[0]
self._scaffoldPackages = [ scaffoldPackage ]
self._scaffoldPackageOptionNames = [ None ]
self._settings = {
'scaffoldPackage' : scaffoldPackage,
'deleteElementRanges' : '',
'displayNodePoints' : False,
'displayNodeNumbers' : False,
'displayNodeDerivatives' : 0, # tri-state: 0=show none, 1=show selected, 2=show all
'displayNodeDerivativeLabels' : self._nodeDerivativeLabels[0:3],
'displayLines' : True,
'displayLinesExterior' : False,
'displayModelRadius' : False,
'displaySurfaces' : True,
'displaySurfacesExterior' : True,
'displaySurfacesTranslucent' : True,
'displaySurfacesWireframe' : False,
'displayElementNumbers' : False,
'displayElementAxes' : False,
'displayAxes' : True,
'displayMarkerPoints' : False,
'modelCoordinatesField' : 'coordinates'
}
self._customScaffoldPackage = None # temporary storage of custom mesh options and edits, to switch back to
self._unsavedNodeEdits = False # Whether nodes have been edited since ScaffoldPackage meshEdits last updated
def _updateScaffoldEdits(self):
'''
Ensure mesh and annotation group edits are up-to-date.
'''
if self._unsavedNodeEdits:
self._scaffoldPackages[-1].setMeshEdits(exnodeStringFromGroup(self._region, 'meshEdits', [ 'coordinates' ]))
self._unsavedNodeEdits = False
self._scaffoldPackages[-1].updateUserAnnotationGroups()
def _saveCustomScaffoldPackage(self):
'''
Copy current ScaffoldPackage to custom ScaffoldPackage to be able to switch back to later.
'''
self._updateScaffoldEdits()
self._customScaffoldPackage = copy.deepcopy(self._scaffoldPackages[-1])
def _useCustomScaffoldPackage(self):
if (not self._customScaffoldPackage) or (self._parameterSetName != 'Custom'):
self._saveCustomScaffoldPackage()
self._parameterSetName = 'Custom'
if self._customParametersCallback:
self._customParametersCallback()
def getRegion(self):
return self._region
def _resetModelCoordinatesField(self):
self._modelCoordinatesField = None
def _setModelCoordinatesField(self, modelCoordinatesField):
if modelCoordinatesField:
self._modelCoordinatesField = modelCoordinatesField.castFiniteElement()
if self._modelCoordinatesField.isValid():
self._settings['modelCoordinatesField'] = modelCoordinatesField.getName()
return
# reset
self._modelCoordinatesField = None
self._settings['modelCoordinatesField'] = "coordinates"
def _discoverModelCoordinatesField(self):
"""
Discover new model coordintes field by previous name or default "coordinates" or first found.
"""
fieldmodule = self._region.getFieldmodule()
modelCoordinatesField = fieldmodule.findFieldByName(self._settings['modelCoordinatesField'])
if not fieldIsManagedCoordinates(modelCoordinatesField):
if self._settings['modelCoordinatesField'] != "coordinates":
modelCoordinatesField = fieldmodule.findFieldByName("coordinates").castFiniteElement()
if not fieldIsManagedCoordinates(modelCoordinatesField):
fieldIter = fieldmodule.createFielditerator()
field = fieldIter.next()
while field.isValid():
if fieldIsManagedCoordinates(field):
modelCoordinatesField = field.castFiniteElement()
break
field = fieldIter.next()
else:
modelCoordinatesField = None
self._setModelCoordinatesField(modelCoordinatesField)
def getModelCoordinatesField(self):
return self._modelCoordinatesField
def setModelCoordinatesField(self, modelCoordinatesField):
"""
For outside use, sets field and rebuilds graphics.
"""
self._setModelCoordinatesField(modelCoordinatesField)
if not self._modelCoordinatesField:
self._discoverModelCoordinatesField()
self._createGraphics()
def getMeshEditsGroup(self):
fm = self._region.getFieldmodule()
return fm.findFieldByName('meshEdits').castGroup()
def getOrCreateMeshEditsNodesetGroup(self, nodeset):
'''
Someone is about to edit a node, and must add the modified node to this nodesetGroup.
'''
fm = self._region.getFieldmodule()
with ChangeManager(fm):
group = fm.findFieldByName('meshEdits').castGroup()
if not group.isValid():
group = fm.createFieldGroup()
group.setName('meshEdits')
group.setManaged(True)
self._unsavedNodeEdits = True
self._useCustomScaffoldPackage()
fieldNodeGroup = group.getFieldNodeGroup(nodeset)
if not fieldNodeGroup.isValid():
fieldNodeGroup = group.createFieldNodeGroup(nodeset)
nodesetGroup = fieldNodeGroup.getNodesetGroup()
return nodesetGroup
def interactionRotate(self, axis, angle):
mat1 = axis_angle_to_rotation_matrix(axis, angle)
mat2 = euler_to_rotation_matrix([ deg*math.pi/180.0 for deg in self._scaffoldPackages[-1].getRotation() ])
newmat = matrix_mult(mat1, mat2)
rotation = [ rad*180.0/math.pi for rad in rotation_matrix_to_euler(newmat) ]
if self._scaffoldPackages[-1].setRotation(rotation):
self._setGraphicsTransformation()
if self._transformationChangeCallback:
self._transformationChangeCallback()
def interactionScale(self, uniformScale):
scale = self._scaffoldPackages[-1].getScale()
if self._scaffoldPackages[-1].setScale([ (scale[i]*uniformScale) for i in range(3) ]):
self._setGraphicsTransformation()
if self._transformationChangeCallback:
self._transformationChangeCallback()
def interactionTranslate(self, offset):
translation = self._scaffoldPackages[-1].getTranslation()
if self._scaffoldPackages[-1].setTranslation([ (translation[i] + offset[i]) for i in range(3) ]):
self._setGraphicsTransformation()
if self._transformationChangeCallback:
self._transformationChangeCallback()
def interactionEnd(self):
pass
def getAnnotationGroups(self):
'''
:return: Alphabetically sorted list of annotation group names.
'''
return self._scaffoldPackages[-1].getAnnotationGroups()
def createUserAnnotationGroup(self):
'''
Create a new annotation group with automatic name, define it from
the current selection and set it as the current annotation group.
:return: New annotation group.
'''
self._currentAnnotationGroup = self._scaffoldPackages[-1].createUserAnnotationGroup()
self.redefineCurrentAnnotationGroupFromSelection()
return self._currentAnnotationGroup
def deleteAnnotationGroup(self, annotationGroup):
'''
Delete the annotation group. If the current annotation group is deleted, set an empty group.
:return: True on success, otherwise False
'''
if self._scaffoldPackages[-1].deleteAnnotationGroup(annotationGroup):
if annotationGroup is self._currentAnnotationGroup:
self.setCurrentAnnotationGroup(None)
return True
print('Cannot delete annotation group')
return False
def redefineCurrentAnnotationGroupFromSelection(self):
if not self._currentAnnotationGroup:
return False
scene = self._region.getScene()
group = self._currentAnnotationGroup.getGroup()
group.clear()
selectionGroup = get_scene_selection_group(scene)
if selectionGroup:
fieldmodule = self._region.getFieldmodule()
with ChangeManager(fieldmodule):
group.setSubelementHandlingMode(FieldGroup.SUBELEMENT_HANDLING_MODE_FULL)
highest_dimension = group_get_highest_dimension(selectionGroup)
group_add_group_elements(group, selectionGroup, highest_dimension)
# redefine selection to match group, removes orphaned lower dimensional elements.
selectionGroup.clear()
group_add_group_elements(selectionGroup, group, highest_dimension)
return True
def setCurrentAnnotationGroupName(self, newName):
'''
Rename current annotation group, but ensure it is a user group and name is not already in use.
:return: True on success, otherwise False
'''
if self._currentAnnotationGroup and self.isUserAnnotationGroup(self._currentAnnotationGroup) and \
(not findAnnotationGroupByName(self.getAnnotationGroups(), newName)):
return self._currentAnnotationGroup.setName(newName)
return False
def setCurrentAnnotationGroupOntId(self, newOntId):
'''
:return: True on success, otherwise False
'''
if self._currentAnnotationGroup and self.isUserAnnotationGroup(self._currentAnnotationGroup):
return self._currentAnnotationGroup.setId(newOntId)
return False
def isUserAnnotationGroup(self, annotationGroup):
'''
:return: True if annotationGroup is user-created and editable.
'''
return self._scaffoldPackages[-1].isUserAnnotationGroup(annotationGroup)
def getCurrentAnnotationGroup(self):
'''
Get the current annotation group stored for possible editing.
'''
return self._currentAnnotationGroup
def setCurrentAnnotationGroup(self, annotationGroup : AnnotationGroup):
'''
Set annotationGroup as current and replace the selection with its objects.
:param annotationGroup: Group to select, or None to clear selection.
'''
#print('setCurrentAnnotationGroup', annotationGroup.getName() if annotationGroup else None)
self._currentAnnotationGroup = annotationGroup
fieldmodule = self._region.getFieldmodule()
with ChangeManager(fieldmodule):
scene = self._region.getScene()
selectionGroup = get_scene_selection_group(scene)
if annotationGroup:
if selectionGroup:
selectionGroup.clear()
else:
selectionGroup = create_scene_selection_group(scene)
group = annotationGroup.getGroup()
group_add_group_elements(selectionGroup, group, group_get_highest_dimension(group))
else:
if selectionGroup:
selectionGroup.clear()
scene.setSelectionField(Field())
def setCurrentAnnotationGroupByName(self, annotationGroupName):
annotationGroup = findAnnotationGroupByName(self.getAnnotationGroups(), annotationGroupName)
self.setCurrentAnnotationGroup(annotationGroup)
def _setScaffoldType(self, scaffoldType):
if len(self._scaffoldPackages) == 1:
# root scaffoldPackage
self._settings['scaffoldPackage'] = self._scaffoldPackages[0] = ScaffoldPackage(scaffoldType)
else:
# nested ScaffoldPackage
self._scaffoldPackages[-1] = self.getParentScaffoldType().getOptionScaffoldPackage(self._scaffoldPackageOptionNames[-1], scaffoldType)
self._customScaffoldPackage = None
self._unsavedNodeEdits = False
self._parameterSetName = self.getEditScaffoldParameterSetNames()[0]
self._generateMesh()
def _getScaffoldTypeByName(self, name):
for scaffoldType in self._allScaffoldTypes:
if scaffoldType.getName() == name:
return scaffoldType
return None
def setScaffoldTypeByName(self, name):
scaffoldType = self._getScaffoldTypeByName(name)
if scaffoldType is not None:
parentScaffoldType = self.getParentScaffoldType()
assert (not parentScaffoldType) or (scaffoldType in parentScaffoldType.getOptionValidScaffoldTypes(self._scaffoldPackageOptionNames[-1])), \
'Invalid scaffold type for parent scaffold'
if scaffoldType != self.getEditScaffoldType():
self._setScaffoldType(scaffoldType)
def getAvailableScaffoldTypeNames(self):
scaffoldTypeNames = []
parentScaffoldType = self.getParentScaffoldType()
validScaffoldTypes = parentScaffoldType.getOptionValidScaffoldTypes(self._scaffoldPackageOptionNames[-1]) if parentScaffoldType else None
for scaffoldType in self._allScaffoldTypes:
if (not parentScaffoldType) or (scaffoldType in validScaffoldTypes):
scaffoldTypeNames.append(scaffoldType.getName())
return scaffoldTypeNames
def getEditScaffoldTypeName(self):
return self.getEditScaffoldType().getName()
def editingRootScaffoldPackage(self):
'''
:return: True if editing root ScaffoldPackage, else False.
'''
return len(self._scaffoldPackages) == 1
def getEditScaffoldType(self):
'''
Get scaffold type currently being edited, including nested scaffolds.
'''
return self._scaffoldPackages[-1].getScaffoldType()
def getEditScaffoldSettings(self):
'''
Get settings for scaffold type currently being edited, including nested scaffolds.
'''
return self._scaffoldPackages[-1].getScaffoldSettings()
def getEditScaffoldOptionDisplayName(self):
'''
Get option display name for sub scaffold package being edited.
'''
return '/'.join(self._scaffoldPackageOptionNames[1:])
def getEditScaffoldOrderedOptionNames(self):
return self._scaffoldPackages[-1].getScaffoldType().getOrderedOptionNames()
def getEditScaffoldParameterSetNames(self):
if self.editingRootScaffoldPackage():
return self._scaffoldPackages[0].getScaffoldType().getParameterSetNames()
# may need to change if scaffolds nested two deep
return self.getParentScaffoldType().getOptionScaffoldTypeParameterSetNames( \
self._scaffoldPackageOptionNames[-1], self._scaffoldPackages[-1].getScaffoldType())
def getDefaultScaffoldPackageForParameterSetName(self, parameterSetName):
'''
:return: Default ScaffoldPackage set up with named parameter set.
'''
if self.editingRootScaffoldPackage():
scaffoldType = self._scaffoldPackages[0].getScaffoldType()
return ScaffoldPackage(scaffoldType, { 'scaffoldSettings' : scaffoldType.getDefaultOptions(parameterSetName) })
# may need to change if scaffolds nested two deep
return self.getParentScaffoldType().getOptionScaffoldPackage( \
self._scaffoldPackageOptionNames[-1], self._scaffoldPackages[-1].getScaffoldType(), parameterSetName)
def getEditScaffoldOption(self, key):
return self.getEditScaffoldSettings()[key]
def getEditScaffoldOptionStr(self, key):
value = self.getEditScaffoldSettings()[key]
if type(value) is list:
if type(value[0]) is int:
return ', '.join(str(v) for v in value)
elif type(value[0]) is float:
return ', '.join(STRING_FLOAT_FORMAT.format(v) for v in value)
return str(value)
def getParentScaffoldType(self):
'''
:return: Parent scaffold type or None if root scaffold.
'''
if len(self._scaffoldPackages) > 1:
return self._scaffoldPackages[-2].getScaffoldType()
return None
def getParentScaffoldOption(self, key):
assert len(self._scaffoldPackages) > 1, 'Attempt to get parent option on root scaffold'
parentScaffoldSettings = self._scaffoldPackages[-2].getScaffoldSettings()
return parentScaffoldSettings[key]
def _checkCustomParameterSet(self):
'''
Work out whether ScaffoldPackage has a predefined parameter set or 'Custom'.
'''
self._customScaffoldPackage = None
self._unsavedNodeEdits = False
self._parameterSetName = None
scaffoldPackage = self._scaffoldPackages[-1]
for parameterSetName in reversed(self.getEditScaffoldParameterSetNames()):
tmpScaffoldPackage = self.getDefaultScaffoldPackageForParameterSetName(parameterSetName)
if tmpScaffoldPackage == scaffoldPackage:
self._parameterSetName = parameterSetName
break
if not self._parameterSetName:
self._useCustomScaffoldPackage()
def _clearMeshEdits(self):
self._scaffoldPackages[-1].setMeshEdits(None)
self._unsavedNodeEdits = False
def editScaffoldPackageOption(self, optionName):
'''
Switch to editing a nested scaffold.
'''
settings = self.getEditScaffoldSettings()
scaffoldPackage = settings.get(optionName)
assert isinstance(scaffoldPackage, ScaffoldPackage), 'Option is not a ScaffoldPackage'
self._clearMeshEdits()
self._scaffoldPackages.append(scaffoldPackage)
self._scaffoldPackageOptionNames.append(optionName)
self._checkCustomParameterSet()
self._generateMesh()
def endEditScaffoldPackageOption(self):
'''
End editing of the last ScaffoldPackage, moving up to parent or top scaffold type.
'''
assert len(self._scaffoldPackages) > 1, 'Attempt to end editing root ScaffoldPackage'
self._updateScaffoldEdits()
# store the edited scaffold in the settings option
optionName = self._scaffoldPackageOptionNames.pop()
scaffoldPackage = self._scaffoldPackages.pop()
settings = self.getEditScaffoldSettings()
settings[optionName] = copy.deepcopy(scaffoldPackage)
self._checkCustomParameterSet()
self._generateMesh()
def getInteractiveFunctions(self):
'''
Return list of interactive functions for current scaffold type.
:return: list(tuples), (name : str, callable(region, options)).
'''
return self._scaffoldPackages[-1].getScaffoldType().getInteractiveFunctions()
def getInteractiveFunctionOptions(self, functionName):
'''
:param functionName: Name of the interactive function.
:return: Options dict for function with supplied name.
'''
interactiveFunctions = self.getInteractiveFunctions()
for interactiveFunction in interactiveFunctions:
if interactiveFunction[0] == functionName:
return interactiveFunction[1]
return {}
def performInteractiveFunction(self, functionName, functionOptions):
'''
Perform interactive function of supplied name for current scaffold.
:param functionName: Name of the interactive function.
:param option: User-modified options to pass to the function.
:return: True if scaffold settings changed.
'''
interactiveFunctions = self.getInteractiveFunctions()
for interactiveFunction in interactiveFunctions:
if interactiveFunction[0] == functionName:
settingsChanged, nodesChanged = interactiveFunction[2](self._region, self._scaffoldPackages[-1].getScaffoldSettings(), functionOptions, 'meshEdits')
if nodesChanged:
self._unsavedNodeEdits = True
self._updateScaffoldEdits()
self._checkCustomParameterSet()
return settingsChanged
return False
def getAvailableParameterSetNames(self):
parameterSetNames = self.getEditScaffoldParameterSetNames()
if self._customScaffoldPackage:
parameterSetNames.insert(0, 'Custom')
return parameterSetNames
def getParameterSetName(self):
'''
:return: Name of currently active parameter set.
'''
return self._parameterSetName
def setParameterSetName(self, parameterSetName):
if self._parameterSetName == 'Custom':
self._saveCustomScaffoldPackage()
if parameterSetName == 'Custom':
self._scaffoldPackages[-1] = copy.deepcopy(self._customScaffoldPackage)
else:
self._scaffoldPackages[-1] = self.getDefaultScaffoldPackageForParameterSetName(parameterSetName)
if len(self._scaffoldPackages) == 1:
self._settings['scaffoldPackage'] = self._scaffoldPackages[0]
self._parameterSetName = parameterSetName
self._unsavedNodeEdits = False
self._generateMesh()
def setScaffoldOption(self, key, value):
'''
:param value: New option value as a string.
:return: True if other dependent options have changed, otherwise False.
On True return client is expected to refresh all option values in UI.
'''
scaffoldType = self.getEditScaffoldType()
settings = self.getEditScaffoldSettings()
oldValue = settings[key]
# print('setScaffoldOption: key ', key, ' value ', str(value))
newValue = None
try:
if type(oldValue) is bool:
newValue = bool(value)
elif type(oldValue) is int:
newValue = int(value)
elif type(oldValue) is float:
newValue = float(value)
elif type(oldValue) is str:
newValue = str(value)
elif type(oldValue) is list:
# requires at least one value to work:
if type(oldValue[0]) is float:
newValue = parseListFloat(value)
elif type(oldValue[0]) is int:
newValue = parseListInt(value)
else:
assert False, 'Unimplemented type in list for scaffold option'
else:
assert False, 'Unimplemented type in scaffold option'
except:
print('setScaffoldOption: Invalid value')
return
settings[key] = newValue
dependentChanges = scaffoldType.checkOptions(settings)
# print('final value = ', settings[key])
if settings[key] != oldValue:
self._clearMeshEdits()
self._useCustomScaffoldPackage()
self._generateMesh()
return dependentChanges
def getDeleteElementsRangesText(self):
return self._settings['deleteElementRanges']
def _parseDeleteElementsRangesText(self, elementRangesTextIn):
"""
:return: True if ranges changed, otherwise False
"""
elementRanges = identifier_ranges_from_string(elementRangesTextIn)
changed = self._deleteElementRanges != elementRanges
self._deleteElementRanges = elementRanges
self._settings['deleteElementRanges'] = identifier_ranges_to_string(elementRanges)
return changed
def setDeleteElementsRangesText(self, elementRangesTextIn):
if self._parseDeleteElementsRangesText(elementRangesTextIn):
self._generateMesh()
def deleteElementsSelection(self):
'''
Add the elements in the scene selection to the delete element ranges and delete.
'''
fm = self._region.getFieldmodule()
scene = self._region.getScene()
mesh = self._getMesh()
selectionGroup = scene.getSelectionField().castGroup()
meshGroup = selectionGroup.getFieldElementGroup(mesh).getMeshGroup()
if meshGroup.isValid() and (meshGroup.getSize() > 0):
# merge selection with current delete element ranges
elementRanges = self._deleteElementRanges + mesh_group_to_identifier_ranges(meshGroup)
identifier_ranges_fix(elementRanges)
self._deleteElementRanges = elementRanges
oldText = self._settings['deleteElementRanges']
self._settings['deleteElementRanges'] = identifier_ranges_to_string(elementRanges)
if self._settings['deleteElementRanges'] != oldText:
self._generateMesh()
def applyTransformation(self):
'''
Apply transformation to nodes and clear it, recording all modified nodes.
'''
scaffoldPackage = self._scaffoldPackages[-1]
fieldmodule = self._region.getFieldmodule()
with ChangeManager(fieldmodule):
if scaffoldPackage.applyTransformation():
scaffoldPackage.setRotation([0.0, 0.0, 0.0])
scaffoldPackage.setScale([1.0, 1.0, 1.0])
scaffoldPackage.setTranslation([0.0, 0.0, 0.0])
# mark all nodes as edited:
coordinates = fieldmodule.findFieldByName('coordinates')
if coordinates.isValid():
nodes = fieldmodule.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
meshEditsNodeset = self.getOrCreateMeshEditsNodesetGroup(nodes)
meshEditsNodeset.addNodesConditional(fieldmodule.createFieldIsDefined(coordinates))
self._updateScaffoldEdits()
self._checkCustomParameterSet()
self._setGraphicsTransformation()
def getRotationText(self):
return ', '.join(STRING_FLOAT_FORMAT.format(value) for value in self._scaffoldPackages[-1].getRotation())
def setRotationText(self, rotationTextIn):
rotation = parseVector3(rotationTextIn, delimiter=",", defaultValue=0.0)
if self._scaffoldPackages[-1].setRotation(rotation):
self._setGraphicsTransformation()
def getScaleText(self):
return ', '.join(STRING_FLOAT_FORMAT.format(value) for value in self._scaffoldPackages[-1].getScale())
def setScaleText(self, scaleTextIn):
scale = parseVector3(scaleTextIn, delimiter=",", defaultValue=1.0)
if self._scaffoldPackages[-1].setScale(scale):
self._setGraphicsTransformation()
def getTranslationText(self):
return ', '.join(STRING_FLOAT_FORMAT.format(value) for value in self._scaffoldPackages[-1].getTranslation())
def setTranslationText(self, translationTextIn):
translation = parseVector3(translationTextIn, delimiter=",", defaultValue=0.0)
if self._scaffoldPackages[-1].setTranslation(translation):
self._setGraphicsTransformation()
def registerCustomParametersCallback(self, customParametersCallback):
self._customParametersCallback = customParametersCallback
def registerSceneChangeCallback(self, sceneChangeCallback):
self._sceneChangeCallback = sceneChangeCallback
def registerTransformationChangeCallback(self, transformationChangeCallback):
self._transformationChangeCallback = transformationChangeCallback
def _getVisibility(self, graphicsName):
return self._settings[graphicsName]
def _setVisibility(self, graphicsName, show):
self._settings[graphicsName] = show
graphics = self._region.getScene().findGraphicsByName(graphicsName)
graphics.setVisibilityFlag(show)
def isDisplayMarkerPoints(self):
return self._getVisibility('displayMarkerPoints')
def setDisplayMarkerPoints(self, show):
self._setVisibility('displayMarkerPoints', show)
def isDisplayAxes(self):
return self._getVisibility('displayAxes')
def setDisplayAxes(self, show):
self._setVisibility('displayAxes', show)
def isDisplayElementNumbers(self):
return self._getVisibility('displayElementNumbers')
def setDisplayElementNumbers(self, show):
self._setVisibility('displayElementNumbers', show)
def isDisplayLines(self):
return self._getVisibility('displayLines')
def setDisplayLines(self, show):
self._setVisibility('displayLines', show)
def isDisplayLinesExterior(self):
return self._settings['displayLinesExterior']
def setDisplayLinesExterior(self, isExterior):
self._settings['displayLinesExterior'] = isExterior
lines = self._region.getScene().findGraphicsByName('displayLines')
lines.setExterior(self.isDisplayLinesExterior())
def isDisplayModelRadius(self):
return self._getVisibility('displayModelRadius')
def setDisplayModelRadius(self, show):
if show != self._settings['displayModelRadius']:
self._settings['displayModelRadius'] = show
self._createGraphics()
def getDisplayNodeDerivatives(self):
'''
:return: tri-state: 0=show none, 1=show selected, 2=show all
'''
return self._settings['displayNodeDerivatives']
def _setAllGraphicsVisibility(self, graphicsName, show, selectMode=None):
'''
Ensure visibility of all graphics with graphicsName is set to boolean show.
:param selectMode: Optional selectMode to set at the same time.
'''
scene = self._region.getScene()
graphics = scene.findGraphicsByName(graphicsName)
while graphics.isValid():
graphics.setVisibilityFlag(show)
if selectMode:
graphics.setSelectMode(selectMode)
while True:
graphics = scene.getNextGraphics(graphics)
if (not graphics.isValid()) or (graphics.getName() == graphicsName):
break
def setDisplayNodeDerivatives(self, triState):
'''
:param triState: From Qt::CheckState: 0=show none, 1=show selected, 2=show all
'''
self._settings['displayNodeDerivatives'] = triState
for nodeDerivativeLabel in self._nodeDerivativeLabels:
self._setAllGraphicsVisibility('displayNodeDerivatives' + nodeDerivativeLabel,
bool(triState) and self.isDisplayNodeDerivativeLabels(nodeDerivativeLabel),
selectMode = Graphics.SELECT_MODE_DRAW_SELECTED if (triState == 1) else Graphics.SELECT_MODE_ON)
def isDisplayNodeDerivativeLabels(self, nodeDerivativeLabel):
'''
:param nodeDerivativeLabel: Label from self._nodeDerivativeLabels ('D1', 'D2' ...)
'''
return nodeDerivativeLabel in self._settings['displayNodeDerivativeLabels']
def setDisplayNodeDerivativeLabels(self, nodeDerivativeLabel, show):
'''
:param nodeDerivativeLabel: Label from self._nodeDerivativeLabels ('D1', 'D2' ...)
'''
shown = nodeDerivativeLabel in self._settings['displayNodeDerivativeLabels']
if show:
if not shown:
# keep in same order as self._nodeDerivativeLabels
nodeDerivativeLabels = []
for label in self._nodeDerivativeLabels:
if (label == nodeDerivativeLabel) or self.isDisplayNodeDerivativeLabels(label):
nodeDerivativeLabels.append(label)
self._settings['displayNodeDerivativeLabels'] = nodeDerivativeLabels
else:
if shown:
self._settings['displayNodeDerivativeLabels'].remove(nodeDerivativeLabel)
self._setAllGraphicsVisibility('displayNodeDerivatives' + nodeDerivativeLabel, show and bool(self.getDisplayNodeDerivatives()))
def isDisplayNodeNumbers(self):
return self._getVisibility('displayNodeNumbers')
def setDisplayNodeNumbers(self, show):
self._setVisibility('displayNodeNumbers', show)
def isDisplayNodePoints(self):
return self._getVisibility('displayNodePoints')
def setDisplayNodePoints(self, show):
self._setVisibility('displayNodePoints', show)
def isDisplaySurfaces(self):
return self._getVisibility('displaySurfaces')
def setDisplaySurfaces(self, show):
self._setVisibility('displaySurfaces', show)
def isDisplaySurfacesExterior(self):
return self._settings['displaySurfacesExterior']
def setDisplaySurfacesExterior(self, isExterior):
self._settings['displaySurfacesExterior'] = isExterior
surfaces = self._region.getScene().findGraphicsByName('displaySurfaces')
surfaces.setExterior(self.isDisplaySurfacesExterior() if (self.getMeshDimension() == 3) else False)
def isDisplaySurfacesTranslucent(self):
return self._settings['displaySurfacesTranslucent']
def setDisplaySurfacesTranslucent(self, isTranslucent):
self._settings['displaySurfacesTranslucent'] = isTranslucent
surfaces = self._region.getScene().findGraphicsByName('displaySurfaces')
surfacesMaterial = self._materialmodule.findMaterialByName('trans_blue' if isTranslucent else 'solid_blue')
surfaces.setMaterial(surfacesMaterial)
lines = self._region.getScene().findGraphicsByName('displayLines')
lineattr = lines.getGraphicslineattributes()
isTranslucentLines = isTranslucent and (lineattr.getShapeType() == lineattr.SHAPE_TYPE_CIRCLE_EXTRUSION)
linesMaterial = self._materialmodule.findMaterialByName('trans_blue' if isTranslucentLines else 'default')
lines.setMaterial(linesMaterial)
def isDisplaySurfacesWireframe(self):
return self._settings['displaySurfacesWireframe']
def setDisplaySurfacesWireframe(self, isWireframe):
self._settings['displaySurfacesWireframe'] = isWireframe
surfaces = self._region.getScene().findGraphicsByName('displaySurfaces')
surfaces.setRenderPolygonMode(Graphics.RENDER_POLYGON_MODE_WIREFRAME if isWireframe else Graphics.RENDER_POLYGON_MODE_SHADED)
def isDisplayElementAxes(self):
return self._getVisibility('displayElementAxes')
def setDisplayElementAxes(self, show):
self._setVisibility('displayElementAxes', show)
def needPerturbLines(self):
"""
Return if solid surfaces are drawn with lines, requiring perturb lines to be activated.
"""
if self._region is None:
return False
mesh2d = self._region.getFieldmodule().findMeshByDimension(2)
if mesh2d.getSize() == 0:
return False
return self.isDisplayLines() and self.isDisplaySurfaces() and not self.isDisplaySurfacesTranslucent()
def _getMesh(self):
fm = self._region.getFieldmodule()
for dimension in range(3,0,-1):
mesh = fm.findMeshByDimension(dimension)
if mesh.getSize() > 0:
break
if mesh.getSize() == 0:
mesh = fm.findMeshByDimension(3)
return mesh
def getMeshDimension(self):
return self._getMesh().getDimension()
def getNodeLocation(self, node_id):
fm = self._region.getFieldmodule()
with ChangeManager(fm):
coordinates = fm.findFieldByName('coordinates')
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
node = nodes.findNodeByIdentifier(node_id)
fc = fm.createFieldcache()
fc.setNode(node)
_, position = coordinates.evaluateReal(fc, 3)
return self._getSceneTransformationFromAdjustedPosition(position)
def getSettings(self):
return self._settings
def setSettings(self, settings):
'''
Called on loading settings from file.
'''
scaffoldPackage = settings.get('scaffoldPackage')
if not scaffoldPackage:
# migrate obsolete options to scaffoldPackage:
scaffoldType = self._getScaffoldTypeByName(settings['meshTypeName'])
del settings['meshTypeName']
scaffoldSettings = settings['meshTypeOptions']
del settings['meshTypeOptions']
scaffoldPackage = ScaffoldPackage(scaffoldType, { 'scaffoldSettings' : scaffoldSettings })
settings['scaffoldPackage'] = scaffoldPackage
# migrate boolean options which are now tri-state
for name in ['displayNodeDerivatives']:
value = settings[name]
if type(value)==bool:
settings[name] = 2 if value else 0
self._settings.update(settings)
self._parseDeleteElementsRangesText(self._settings['deleteElementRanges'])
# migrate old scale text, now held in scaffoldPackage
oldScaleText = self._settings.get('scale')
if oldScaleText:
scaffoldPackage.setScale(parseVector3(oldScaleText, delimiter="*", defaultValue=1.0))
del self._settings['scale'] # remove so can't overwrite scale next time
self._scaffoldPackages = [ scaffoldPackage ]
self._scaffoldPackageOptionNames = [ None ]
self._checkCustomParameterSet()
self._generateMesh()
def _deleteElementsInRanges(self):
'''
If this is the root scaffold and there are ranges of element identifiers to delete,
remove these from the model.
Also remove marker group nodes embedded in those elements and any nodes used only by
the deleted elements.
'''
if (len(self._deleteElementRanges) == 0) or (len(self._scaffoldPackages) > 1):
return
fm = self._region.getFieldmodule()
mesh = self._getMesh()
meshDimension = mesh.getDimension()
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
with ChangeManager(fm):
# put the elements in a group and use subelement handling to get nodes in use by it
destroyGroup = fm.createFieldGroup()
destroyGroup.setSubelementHandlingMode(FieldGroup.SUBELEMENT_HANDLING_MODE_FULL)
destroyElementGroup = destroyGroup.createFieldElementGroup(mesh)
destroyMesh = destroyElementGroup.getMeshGroup()
elementIter = mesh.createElementiterator()
element = elementIter.next()
while element.isValid():
identifier = element.getIdentifier()
for deleteElementRange in self._deleteElementRanges:
if (identifier >= deleteElementRange[0]) and (identifier <= deleteElementRange[1]):
destroyMesh.addElement(element)
element = elementIter.next()
del elementIter
#print("Deleting", destroyMesh.getSize(), "element(s)")
if destroyMesh.getSize() > 0:
destroyNodeGroup = destroyGroup.getFieldNodeGroup(nodes)
destroyNodes = destroyNodeGroup.getNodesetGroup()
markerGroup = fm.findFieldByName("marker").castGroup()
if markerGroup.isValid():
markerNodes = markerGroup.getFieldNodeGroup(nodes).getNodesetGroup()
markerLocation = fm.findFieldByName("marker_location")
#markerName = fm.findFieldByName("marker_name")
if markerNodes.isValid() and markerLocation.isValid():
fieldcache = fm.createFieldcache()
nodeIter = markerNodes.createNodeiterator()
node = nodeIter.next()
while node.isValid():
fieldcache.setNode(node)
element, xi = markerLocation.evaluateMeshLocation(fieldcache, meshDimension)
if element.isValid() and destroyMesh.containsElement(element):
#print("Destroy marker '" + markerName.evaluateString(fieldcache) + "' node", node.getIdentifier(), "in destroyed element", element.getIdentifier(), "at", xi)
destroyNodes.addNode(node) # so destroyed with others below; can't do here as
node = nodeIter.next()
del nodeIter
del fieldcache
# must destroy elements first as Zinc won't destroy nodes that are in use
mesh.destroyElementsConditional(destroyElementGroup)
nodes.destroyNodesConditional(destroyNodeGroup)
# clean up group so no external code hears is notified of its existence
del destroyNodes
del destroyNodeGroup
del destroyMesh
del destroyElementGroup
del destroyGroup
def _generateMesh(self):
currentAnnotationGroupName = self._currentAnnotationGroup.getName() if self._currentAnnotationGroup else None
scaffoldPackage = self._scaffoldPackages[-1]
if self._region:
self._parent_region.removeChild(self._region)
self._resetModelCoordinatesField()
self._region = self._parent_region.createChild(self._region_name)
self._scene = self._region.getScene()
fm = self._region.getFieldmodule()
with ChangeManager(fm):
logger = self._context.getLogger()
scaffoldPackage.generate(self._region, applyTransformation=False)
annotationGroups = scaffoldPackage.getAnnotationGroups()
loggerMessageCount = logger.getNumberOfMessages()
if loggerMessageCount > 0:
for i in range(1, loggerMessageCount + 1):
print(logger.getMessageTypeAtIndex(i), logger.getMessageTextAtIndex(i))
logger.removeAllMessages()
self._deleteElementsInRanges()
self.setCurrentAnnotationGroupByName(currentAnnotationGroupName)
# Zinc won't create cmiss_number and xi fields until endChange called
# Hence must create graphics outside of ChangeManager lifetime:
self._discoverModelCoordinatesField()
self._createGraphics()
if self._sceneChangeCallback:
self._sceneChangeCallback()
def _getAxesScale(self):
'''
Get sizing for axes, taking into account transformation.
'''
scale = self._scaffoldPackages[-1].getScale()
fm = self._region.getFieldmodule()
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
coordinates = fm.findFieldByName('coordinates').castFiniteElement()
componentsCount = coordinates.getNumberOfComponents()
minX, maxX = evaluateFieldNodesetRange(coordinates, nodes)
if componentsCount == 1:
maxRange = (maxX - minX)*scale[0]
else:
maxRange = max((maxX[c] - minX[c])*scale[c] for c in range(componentsCount))
axesScale = 1.0
if maxRange > 0.0:
while axesScale*10.0 < maxRange:
axesScale *= 10.0
while axesScale > maxRange:
axesScale *= 0.1
return axesScale
def _setGraphicsTransformation(self):
'''
Establish 4x4 graphics transformation for current scaffold package.
'''
transformationMatrix = None
for scaffoldPackage in reversed(self._scaffoldPackages):
mat = scaffoldPackage.getTransformationMatrix()
if mat:
transformationMatrix = matrix_mult(mat, transformationMatrix) if transformationMatrix else mat
scene = self._region.getScene()
if transformationMatrix:
# flatten to list of 16 components for passing to Zinc
scene.setTransformationMatrix(transformationMatrix[0] + transformationMatrix[1] + transformationMatrix[2] + transformationMatrix[3])
else:
scene.clearTransformation()
# rescale axes for new scale
axesScale = self._getAxesScale()
scene = self._region.getScene()
with ChangeManager(scene):
axes = scene.findGraphicsByName('displayAxes')
pointattr = axes.getGraphicspointattributes()
pointattr.setBaseSize([ axesScale ])
pointattr.setLabelText(1, ' {:2g}'.format(axesScale))
def _createGraphics(self):
fm = self._region.getFieldmodule()
with ChangeManager(fm):
meshDimension = self.getMeshDimension()
coordinates = self.getModelCoordinatesField()
componentsCount = coordinates.getNumberOfComponents()
nodes = fm.findNodesetByFieldDomainType(Field.DOMAIN_TYPE_NODES)
fieldcache = fm.createFieldcache()
# determine field derivatives for all versions in use: fairly expensive
# fields in same order as self._nodeDerivativeLabels
nodeDerivatives = [ Node.VALUE_LABEL_D_DS1, Node.VALUE_LABEL_D_DS2, Node.VALUE_LABEL_D_DS3,
Node.VALUE_LABEL_D2_DS1DS2, Node.VALUE_LABEL_D2_DS1DS3, Node.VALUE_LABEL_D2_DS2DS3, Node.VALUE_LABEL_D3_DS1DS2DS3 ]
nodeDerivativeFields = [ [ fm.createFieldNodeValue(coordinates, nodeDerivative, 1) ] for nodeDerivative in nodeDerivatives ]
derivativesCount = len(nodeDerivatives)
maxVersions = [ 1 for nodeDerivative in nodeDerivatives ]
lastVersion = 1
version = 2
while True:
nodeIter = nodes.createNodeiterator()
node = nodeIter.next()
foundCount = sum((1 if (v < lastVersion) else 0) for v in maxVersions)
while (node.isValid()) and (foundCount < derivativesCount):
fieldcache.setNode(node)
for d in range(derivativesCount):
if maxVersions[d] == lastVersion: # only look one higher than last version found
result, values = coordinates.getNodeParameters(fieldcache, -1, nodeDerivatives[d], version, componentsCount)
if (result == RESULT_OK) or (result == RESULT_WARNING_PART_DONE):
maxVersions[d] = version
nodeDerivativeFields[d].append(fm.createFieldNodeValue(coordinates, nodeDerivatives[d], version))
foundCount += 1
node = nodeIter.next()
if foundCount >= derivativesCount:
break
lastVersion = version
version += 1
elementDerivativeFields = []
for d in range(meshDimension):
elementDerivativeFields.append(fm.createFieldDerivative(coordinates, d + 1))
elementDerivativesField = fm.createFieldConcatenate(elementDerivativeFields)
cmiss_number = fm.findFieldByName('cmiss_number')
markerGroup = fm.findFieldByName('marker').castGroup()
if not markerGroup.isValid():
markerGroup = fm.createFieldConstant([0.0]) # show nothing to avoid warnings
markerName = findOrCreateFieldStoredString(fm, 'marker_name')
radius = fm.findFieldByName('radius')
markerLocation = findOrCreateFieldStoredMeshLocation(fm, self._getMesh(), name='marker_location')
markerHostCoordinates = fm.createFieldEmbedded(coordinates, markerLocation)
# fixed width glyph size is based on average element size in all dimensions
mesh1d = fm.findMeshByDimension(1)
meanLineLength = 0.0
lineCount = mesh1d.getSize()
if lineCount > 0:
one = fm.createFieldConstant(1.0)
sumLineLength = fm.createFieldMeshIntegral(one, coordinates, mesh1d)
result, totalLineLength = sumLineLength.evaluateReal(fieldcache, 1)
glyphWidth = 0.1*totalLineLength/lineCount
del sumLineLength
del one
if (lineCount == 0) or (glyphWidth == 0.0):
# fallback if no lines: use graphics range
minX, maxX = evaluateFieldNodesetRange(coordinates, nodes)
# use function of coordinate range if no elements
if componentsCount == 1:
maxScale = maxX - minX
else:
first = True
for c in range(componentsCount):
scale = maxX[c] - minX[c]
if first or (scale > maxScale):
maxScale = scale
first = False
if maxScale == 0.0:
maxScale = 1.0
glyphWidth = 0.01*maxScale
del fieldcache
# make graphics
scene = self._region.getScene()
with ChangeManager(scene):
scene.removeAllGraphics()
self._setGraphicsTransformation()
axes = scene.createGraphicsPoints()
axes.setScenecoordinatesystem(SCENECOORDINATESYSTEM_WORLD)
pointattr = axes.getGraphicspointattributes()
pointattr.setGlyphShapeType(Glyph.SHAPE_TYPE_AXES_XYZ)
axesScale = self._getAxesScale()
pointattr.setBaseSize([ axesScale ])
pointattr.setLabelText(1, ' {:2g}'.format(axesScale))
axes.setMaterial(self._materialmodule.findMaterialByName('grey50'))
axes.setName('displayAxes')
axes.setVisibilityFlag(self.isDisplayAxes())
lines = scene.createGraphicsLines()
lines.setCoordinateField(coordinates)
lines.setExterior(self.isDisplayLinesExterior())
lineattr = lines.getGraphicslineattributes()
if self.isDisplayModelRadius() and radius.isValid():
lineattr.setShapeType(lineattr.SHAPE_TYPE_CIRCLE_EXTRUSION)
lineattr.setBaseSize([ 0.0 ])
lineattr.setScaleFactors([ 2.0 ])
lineattr.setOrientationScaleField(radius)
isTranslucentLines = self.isDisplaySurfacesTranslucent() and (lineattr.getShapeType() == lineattr.SHAPE_TYPE_CIRCLE_EXTRUSION)
linesMaterial = self._materialmodule.findMaterialByName('trans_blue' if isTranslucentLines else 'default')
lines.setMaterial(linesMaterial)
lines.setName('displayLines')
lines.setVisibilityFlag(self.isDisplayLines())
nodePoints = scene.createGraphicsPoints()
nodePoints.setFieldDomainType(Field.DOMAIN_TYPE_NODES)
nodePoints.setCoordinateField(coordinates)
pointattr = nodePoints.getGraphicspointattributes()
pointattr.setGlyphShapeType(Glyph.SHAPE_TYPE_SPHERE)
if self.isDisplayModelRadius() and radius.isValid():
pointattr.setBaseSize([ 0.0 ])
pointattr.setScaleFactors([ 2.0 ])
pointattr.setOrientationScaleField(radius)
else:
pointattr.setBaseSize([ glyphWidth ])
nodePoints.setMaterial(self._materialmodule.findMaterialByName('white'))
nodePoints.setName('displayNodePoints')
nodePoints.setVisibilityFlag(self.isDisplayNodePoints())
nodeNumbers = scene.createGraphicsPoints()
nodeNumbers.setFieldDomainType(Field.DOMAIN_TYPE_NODES)
nodeNumbers.setCoordinateField(coordinates)
pointattr = nodeNumbers.getGraphicspointattributes()
pointattr.setLabelField(cmiss_number)
pointattr.setGlyphShapeType(Glyph.SHAPE_TYPE_NONE)
nodeNumbers.setMaterial(self._materialmodule.findMaterialByName('green'))
nodeNumbers.setName('displayNodeNumbers')
nodeNumbers.setVisibilityFlag(self.isDisplayNodeNumbers())
# names in same order as self._nodeDerivativeLabels 'D1', 'D2', 'D3', 'D12', 'D13', 'D23', 'D123' and nodeDerivativeFields
nodeDerivativeMaterialNames = [ 'gold', 'silver', 'green', 'cyan', 'magenta', 'yellow', 'blue' ]
derivativeScales = [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ]
for i in range(len(self._nodeDerivativeLabels)):
nodeDerivativeLabel = self._nodeDerivativeLabels[i]
maxVersions = len(nodeDerivativeFields[i])
for v in range(maxVersions):
nodeDerivatives = scene.createGraphicsPoints()
nodeDerivatives.setFieldDomainType(Field.DOMAIN_TYPE_NODES)
nodeDerivatives.setCoordinateField(coordinates)
pointattr = nodeDerivatives.getGraphicspointattributes()
pointattr.setGlyphShapeType(Glyph.SHAPE_TYPE_ARROW_SOLID)
pointattr.setOrientationScaleField(nodeDerivativeFields[i][v])
pointattr.setBaseSize([0.0, glyphWidth, glyphWidth])
pointattr.setScaleFactors([ derivativeScales[i], 0.0, 0.0 ])
if maxVersions > 1:
pointattr.setLabelOffset([ 1.05, 0.0, 0.0 ])
pointattr.setLabelText(1, str(v + 1))
material = self._materialmodule.findMaterialByName(nodeDerivativeMaterialNames[i])
nodeDerivatives.setMaterial(material)
nodeDerivatives.setSelectedMaterial(material)
nodeDerivatives.setName('displayNodeDerivatives' + nodeDerivativeLabel)
displayNodeDerivatives = self.getDisplayNodeDerivatives() # tri-state: 0=show none, 1=show selected, 2=show all
nodeDerivatives.setSelectMode(Graphics.SELECT_MODE_DRAW_SELECTED if (displayNodeDerivatives == 1) else Graphics.SELECT_MODE_ON)
nodeDerivatives.setVisibilityFlag(bool(displayNodeDerivatives) and self.isDisplayNodeDerivativeLabels(nodeDerivativeLabel))
elementNumbers = scene.createGraphicsPoints()
elementNumbers.setFieldDomainType(Field.DOMAIN_TYPE_MESH_HIGHEST_DIMENSION)
elementNumbers.setCoordinateField(coordinates)
pointattr = elementNumbers.getGraphicspointattributes()
pointattr.setLabelField(cmiss_number)
pointattr.setGlyphShapeType(Glyph.SHAPE_TYPE_NONE)
elementNumbers.setMaterial(self._materialmodule.findMaterialByName('cyan'))
elementNumbers.setName('displayElementNumbers')
elementNumbers.setVisibilityFlag(self.isDisplayElementNumbers())
surfaces = scene.createGraphicsSurfaces()
surfaces.setCoordinateField(coordinates)
surfaces.setRenderPolygonMode(Graphics.RENDER_POLYGON_MODE_WIREFRAME if self.isDisplaySurfacesWireframe() else Graphics.RENDER_POLYGON_MODE_SHADED)
surfaces.setExterior(self.isDisplaySurfacesExterior() if (meshDimension == 3) else False)
surfacesMaterial = self._materialmodule.findMaterialByName('trans_blue' if self.isDisplaySurfacesTranslucent() else 'solid_blue')
surfaces.setMaterial(surfacesMaterial)
surfaces.setName('displaySurfaces')
surfaces.setVisibilityFlag(self.isDisplaySurfaces())
elementAxes = scene.createGraphicsPoints()
elementAxes.setFieldDomainType(Field.DOMAIN_TYPE_MESH_HIGHEST_DIMENSION)
elementAxes.setCoordinateField(coordinates)
pointattr = elementAxes.getGraphicspointattributes()
pointattr.setGlyphShapeType(Glyph.SHAPE_TYPE_AXES_123)
pointattr.setOrientationScaleField(elementDerivativesField)
if meshDimension == 1:
pointattr.setBaseSize([0.0, 2*glyphWidth, 2*glyphWidth])
pointattr.setScaleFactors([0.25, 0.0, 0.0])
elif meshDimension == 2:
pointattr.setBaseSize([0.0, 0.0, 2*glyphWidth])
pointattr.setScaleFactors([0.25, 0.25, 0.0])
else:
pointattr.setBaseSize([0.0, 0.0, 0.0])
pointattr.setScaleFactors([0.25, 0.25, 0.25])
elementAxes.setMaterial(self._materialmodule.findMaterialByName('yellow'))
elementAxes.setName('displayElementAxes')
elementAxes.setVisibilityFlag(self.isDisplayElementAxes())
# marker points
markerPoints = scene.createGraphicsPoints()
markerPoints.setFieldDomainType(Field.DOMAIN_TYPE_NODES)
markerPoints.setSubgroupField(markerGroup)
markerPoints.setCoordinateField(markerHostCoordinates)
pointattr = markerPoints.getGraphicspointattributes()
pointattr.setLabelText(1, ' ')
pointattr.setLabelField(markerName)
pointattr.setGlyphShapeType(Glyph.SHAPE_TYPE_CROSS)
pointattr.setBaseSize(2*glyphWidth)
markerPoints.setMaterial(self._materialmodule.findMaterialByName('yellow'))
markerPoints.setName('displayMarkerPoints')
markerPoints.setVisibilityFlag(self.isDisplayMarkerPoints())
logger = self._context.getLogger()
loggerMessageCount = logger.getNumberOfMessages()
if loggerMessageCount > 0:
for i in range(1, loggerMessageCount + 1):
print(logger.getMessageTypeAtIndex(i), logger.getMessageTextAtIndex(i))
logger.removeAllMessages()
def updateSettingsBeforeWrite(self):
self._updateScaffoldEdits()
def done(self):
'''
Finish generating mesh by applying transformation.
'''
assert 1 == len(self._scaffoldPackages)
self._scaffoldPackages[0].applyTransformation()
def writeModel(self, file_name):
self._region.writeFile(file_name)
def writeAnnotations(self, filenameStem):
annotationFilename = filenameStem + '_annotations.csv'
with open(annotationFilename, 'w') as outstream:
outstream.write('Term ID,Group name\n')
annotationGroups = self.getAnnotationGroups()
termNameIds = []
for annotationGroup in annotationGroups:
termNameIds.append((annotationGroup.getName(), annotationGroup.getId()))
termNameIds.sort()
for termNameId in termNameIds:
outstream.write(termNameId[1] + ',' + termNameId[0] + '\n')
def exportToVtk(self, filenameStem):
base_name = os.path.basename(filenameStem)
description = 'Scaffold ' + self._scaffoldPackages[0].getScaffoldType().getName() + ': ' + base_name
exportvtk = ExportVtk(self._region, description, self.getAnnotationGroups())
exportvtk.writeFile(filenameStem + '.vtk')
def exnodeStringFromGroup(region, groupName, fieldNames):
'''
Serialise field within group of groupName to a string.
:param fieldNames: List of fieldNames to output.
:param groupName: Name of group to output.
:return: The string.
'''
sir = region.createStreaminformationRegion()
srm = sir.createStreamresourceMemory()
sir.setResourceGroupName(srm, groupName)
sir.setResourceFieldNames(srm, fieldNames)
region.write(sir)
result, exString = srm.getBuffer()
return exString
def get_scene_selection_group(scene : Scene, subelementHandlingMode = FieldGroup.SUBELEMENT_HANDLING_MODE_FULL):
'''
Get existing scene selection group of standard name.
:param subelementHandlingMode: Mode controlling how faces, lines and nodes are
automatically added or removed with higher dimensional elements.
:return: Existing selection group, or None.
'''
selection_group = scene.getSelectionField().castGroup()
if selection_group.isValid():
selection_group.setSubelementHandlingMode(subelementHandlingMode)
return selection_group
return None
selection_group_name = 'cmiss_selection'
def create_scene_selection_group(scene : Scene, subelementHandlingMode = FieldGroup.SUBELEMENT_HANDLING_MODE_FULL):
'''
Create empty, unmanaged scene selection group of standard name.
Should have already called get_selection_group with None returned.
Can discover orphaned group of that name.
New group has subelement handling on.
:param scene: Zinc Scene to create selection for.
:param subelementHandlingMode: Mode controlling how faces, lines and nodes are
automatically added or removed with higher dimensional elements.
:return: Selection group for scene.
'''
region = scene.getRegion()
fieldmodule = region.getFieldmodule()
with ChangeManager(fieldmodule):
selection_group = fieldmodule.findFieldByName(selection_group_name)
if selection_group.isValid():
selection_group = selection_group.castGroup()
if selection_group.isValid():
selection_group.clear()
selection_group.setManaged(False)
if not selection_group.isValid():
selection_group = fieldmodule.createFieldGroup()
selection_group.setName(selection_group_name)
selection_group.setSubelementHandlingMode(subelementHandlingMode)
scene.setSelectionField(selection_group)
return selection_group
|
rchristie/mapclientplugins.meshgeneratorstep
|
mapclientplugins/meshgeneratorstep/model/meshgeneratormodel.py
|
Python
|
apache-2.0
| 65,320
|
[
"VTK"
] |
da3668ca4dfed350d7d2107b1bcd620e1687c184188c7de6d95c5e669ef83d43
|
import re
from tornado.escape import json_decode
from authlib.common.encoding import to_unicode
from authlib.oauth2 import OAuth2Request as _OAuth2Request
from authlib.oauth2.rfc6749.util import scope_to_list
from urllib.parse import quote
class OAuth2Request(_OAuth2Request):
"""OAuth2 request object"""
def addScopes(self, scopes):
"""Add new scopes to query
:param list scopes: scopes
"""
self.setQueryArguments(scope=list(set(scope_to_list(self.scope or "") + scopes)))
def setQueryArguments(self, **kwargs):
"""Set query arguments"""
for k in kwargs:
# Quote value before add it to request query
value = (
"+".join([quote(str(v)) for v in kwargs[k]]) if isinstance(kwargs[k], list) else quote(str(kwargs[k]))
)
# Remove argument from uri
query = re.sub(r"&{argument}(=[^&]*)?|^{argument}(=[^&]*)?&?".format(argument=k), "", self.query)
# Add new one
if query:
query += "&"
query += "%s=%s" % (k, value)
# Re-init class
self.__init__(self.method, to_unicode(self.path + "?" + query))
@property
def path(self):
"""URL path
:return: str
"""
return self.uri.replace("?%s" % (self.query or ""), "")
@property
def groups(self):
"""Search DIRAC groups in scopes
:return: list
"""
return [s.split(":")[1] for s in scope_to_list(self.scope or "") if s.startswith("g:") and s.split(":")[1]]
@property
def group(self):
"""Search DIRAC group in scopes
:return: str
"""
groups = [s.split(":")[1] for s in scope_to_list(self.scope or "") if s.startswith("g:") and s.split(":")[1]]
return groups[0] if groups else None
@property
def provider(self):
"""Search IdP in scopes
:return: str
"""
return self.data.get("provider")
@provider.setter
def provider(self, provider):
self.setQueryArguments(provider=provider)
@property
def sessionID(self):
"""Search IdP in scopes
:return: str
"""
return self.data.get("id")
@sessionID.setter
def sessionID(self, sessionID):
self.setQueryArguments(id=sessionID)
def toDict(self):
"""Convert class to dictionary
:return: dict
"""
return {"method": self.method, "uri": self.uri}
def createOAuth2Request(request, method_cls=OAuth2Request, use_json=False):
"""Create request object
:param request: request
:type request: object, dict
:param object method_cls: returned class
:param str use_json: if data is json
:return: object -- `OAuth2Request`
"""
if isinstance(request, method_cls):
return request
if isinstance(request, dict):
return method_cls(request["method"], request["uri"], request.get("body"), request.get("headers"))
if use_json:
return method_cls(request.method, request.full_url(), json_decode(request.body), request.headers)
body = {
k: request.body_arguments[k][-1].decode("utf-8") for k in request.body_arguments if request.body_arguments[k]
}
return method_cls(request.method, request.full_url(), body, request.headers)
|
DIRACGrid/DIRAC
|
src/DIRAC/FrameworkSystem/private/authorization/utils/Requests.py
|
Python
|
gpl-3.0
| 3,349
|
[
"DIRAC"
] |
d3b0c1198ddb32e2c5db87b9c6634b16062e80d277b35ee35023fe9a730dddfd
|
#!/usr/bin/env python
from pyDFTutils.vasp.myvasp import myvasp, default_pps
from pyDFTutils.vasp.vasp_utils import read_efermi
from pyDFTutils.ase_utils.geometry import gen_disped_atoms
from ase.io import read
from pyDFTutils.wannier90.wannier import wannier_input,run_wannier
import os
def calc():
atoms = read('POSCAR.vasp')
d_atoms = gen_disped_atoms(atoms, 'Ti1', distance=0.005, direction='all')
# original
pwd = os.getcwd()
path='orig'
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
calc_wannier(atoms)
os.chdir(pwd)
# displaced
pwd = os.getcwd()
path='disp_Ti_x'
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
calc_wannier(d_atoms[0])
os.chdir(pwd)
def calc_wannier(atoms):
mycalc = myvasp(
xc='PBE',
gga='PS',
setups=default_pps,
ispin=2,
icharg=0,
kpts=[6, 6, 6],
gamma=True,
prec='normal',
istart=1,
lmaxmix=4,
encut=500)
mycalc.set(lreal='Auto', algo='normal')
atoms.set_calculator(mycalc)
# electronic
mycalc.set(ismear=-5, sigma=0.1, nelm=100, nelmdl=-6, ediff=1e-7)
mycalc.set(ncore=1, kpar=3)
mycalc.scf_calculation()
mycalc.set(
lwannier90=True,
lwrite_unk=False,
lwrite_mmn_amn=True,
ncore=1,
kpar=3)
wa = wannier_input(atoms=atoms)
efermi = read_efermi()
wa.set(
mp_grid=[6, 6, 6],
num_bands=28,
guiding_centres=True,
num_iter=100,
kmesh_tol=1e-9,
search_shells=24,
write_xyz=True,
hr_plot=True,
)
wa.set_energy_window([-70,0.5],[-67.4,0.4],shift_efermi=efermi)
wa.add_basis('Ba','s')
wa.add_basis('Ba','p')
wa.add_basis('Ti','s')
wa.add_basis('Ti','p')
wa.add_basis('O','s')
wa.add_basis('O','p')
wa.write_input()
mycalc.set(nbands=28)
mycalc.scf_calculation()
run_wannier(spin='up')
run_wannier(spin='dn')
#mycalc.ldos_calculation()
calc()
|
mailhexu/pyDFTutils
|
examples/wannier_vasp/BEC_BBB.py
|
Python
|
lgpl-3.0
| 2,069
|
[
"ASE",
"VASP",
"Wannier90"
] |
f492fe9e8c33b3c53b17862727ddedfc632509b517595b56b3cd593ed7c1fc91
|
"""
SpringLayout.py
This algorithm is based on the physical spring model, in the sense that nodes
with a link between them will want to pull each other together until the
distance between them equals the resting length of the 'spring'. The further
apart they are, the more force that is applied to bring them to the ideal
length.
The algorithm also takes inspiration from electrical repulsive charges. To
this end, each node is modeled as an electrical charge. The charge is
determined by the screen size of the node. The force of the charge diminishes
with the square of the distance, however forces are accumulated from the
interaction of each node on every every other node.
Finally, the algorithm uses the concept of friction to prevent nodes from
slowly sliding away when they are getting nudged by minor electrical forces.
The good: it gives pleasent layouts in many cases, and can be applied to any
subsets of nodes & edges in any meta-model in AToM3... so long as the subset
doesn't contain any hyper-edges :D
Problems and possible solutions with this algorithm:
1) Local minima solutions, which prevent the algorithm from finding better solns.
a) Add more random displacements
2) Running time
a) Implement an approximate spring based system that is less than O(n^2)
3) Edge crossings, intrinsic problem with this type of algorithm.
4) Compactness of representation: spring layouts are intrinsincally space innefficient
5) Oscillation
a) Add temperature control to reduce both back and forth oscillation
b) Detect and dampen rotational oscillation
6) 'Random' Tkinter errors at runtime, occuring with large graphs and overlapping nodes
a) Do a pre-processing step that guarantees no overlapping nodes
Overhauled July 19, 2004 by Denis Dube
"""
import math
from random import randint
from OptionDatabase import OptionDatabase
from OptionDialog import OptionDialog
from Utilities import optimizeLinks
from ModelSpecificCode import isConnectionLink
def applyLayout( selection = None, atom3i = None, settingsMode = False ):
# Instantiate the layout algorithm, if not already done
if( SpringLayout.instance == None ):
if( atom3i == None ):
raise Exception, "You forgot to initilize "+__name__+" before using it!"
SpringLayout.instance = SpringLayout(atom3i)
if( atom3i ):
SpringLayout.instance.updateATOM3instance( atom3i )
if( settingsMode ):
SpringLayout.instance.settings(selection) # Change spring parameters
elif( selection ):
SpringLayout.instance.main(selection) # Apply spring
class SpringLayout:
instance = None
# Option keys
MAXIMUM_ITERATIONS = 'Maximum iterations'
ANIMATION_UPDATES = 'Animation updates'
SPRING_CONSTANT = 'Spring constant'
SPRING_LENGTH = 'Spring rest length'
CHARGE_STRENGTH = 'Charge strength'
FRICTION = 'Friction'
RANDOM_AMOUNT = 'Random amount'
ARROW_CURVATURE = 'Arrow curvature'
SPLINE_ARROWS = 'Spline arrows'
STICKY_BOUNDARY = 'Sticky boundary'
INFO0 = 'Info0'
INFO1 = 'Info1'
INFO2 = 'Info2'
INFO3 = 'Info3'
INFO4 = 'Info4'
def __init__(self, atom3i ):
self.atom3i = atom3i
self.dc = atom3i.UMLmodel
# Instantiate the Option Database module
self.__optionsDatabase = OptionDatabase(self.atom3i.parent,
'Options_SpringLayout.py', 'Spring Layout Configuration')
# Local methods/variables with short names to make things more readable :D
newOp = self.__optionsDatabase.createNewOption
IE = OptionDialog.INT_ENTRY
FE = OptionDialog.FLOAT_ENTRY
BE = OptionDialog.BOOLEAN_ENTRY
L = OptionDialog.LABEL
# Create New Options
# Format: OptionKey, defaultValue, optionTuple, promptString, helpString
newOp( self.INFO0, None, [L,"Times 12","black", "center" ],
"This spring-electrical algorithm:", "" )
newOp( self.INFO1, None, [L,"Times 12","black", "left" ],
"Has O(n^2) complexity", "" )
newOp( self.INFO3, None, [L,"Times 12","black", "left" ],
"Does not work with hyper-edges", "" )
newOp( self.INFO2, None, [L,"Times 12","black", "left" ],
"Is applied only on selected nodes & edges", "" )
newOp( self.INFO4, None, [L,"Times 12","black", "left" ],"", "" )
newOp( self.MAXIMUM_ITERATIONS, 100, IE, "Maximum Iterations",
"Duration of the spring simulation, longer generally gives better results." )
newOp( self.ANIMATION_UPDATES, 5, IE, "Animation updates",
"Force update of the canvas every X simulation frames." )
newOp( self.SPRING_CONSTANT, 0.1, FE, "Spring Constant",
"The restoring force of the spring, larger values make the spring \"stiffer\"")
newOp( self.SPRING_LENGTH, 100, IE, "Spring rest length",
"This is the minimum distance between the 2 nodes")
newOp( self.CHARGE_STRENGTH, 1000.00, FE, "Charge strength",
"A multiplier on the repulsive force between each and every node." )
newOp( self.FRICTION, 0.01, FE, "Friction",
"Limits the ability of the repulsive force to affect another node." )
newOp( self.RANDOM_AMOUNT, 0.0, FE, "Initial randomization",
"Randomizes the initial position of linked nodes as a percentage of spring length." )
newOp( self.ARROW_CURVATURE, 10, IE, "Arrow curvature",
"Adds a curve of magnitude X to the arrows, set to 0 for a straight arrow." )
newOp( self.SPLINE_ARROWS, True, BE, "Spline arrows",
"Arrows are set to smooth/spline mode and given additional control points." )
newOp( self.STICKY_BOUNDARY, True, BE, "Sticky boundary",
"Prevents nodes from escaping the canvas boundaries." )
# Load the options from the file, on failure the defaults above are used.
self.__optionsDatabase.loadOptionsDatabase()
self.__processLoadedOptions()
def __processLoadedOptions(self):
""" After loading the database, have to get & store each option value """
self.__maxIterations = self.__optionsDatabase.get(self.MAXIMUM_ITERATIONS)
self.__animationUpdates = self.__optionsDatabase.get(self.ANIMATION_UPDATES)
self.__springConstant = self.__optionsDatabase.get(self.SPRING_CONSTANT)
self.__springLength = self.__optionsDatabase.get(self.SPRING_LENGTH)
self.__chargeStrength = self.__optionsDatabase.get(self.CHARGE_STRENGTH)
self.__friction = self.__optionsDatabase.get(self.FRICTION)
self.__stickyBoundary = self.__optionsDatabase.get(self.STICKY_BOUNDARY)
self.__splineArrows = self.__optionsDatabase.get(self.SPLINE_ARROWS)
self.__arrowCurvature = self.__optionsDatabase.get(self.ARROW_CURVATURE)
self.__randomness = self.__optionsDatabase.get(self.RANDOM_AMOUNT)
def updateATOM3instance( self, atom3i ):
self.atom3i = atom3i
def settings(self, selection):
"""
Dialog to interactively change the spring's behavior
Automatically applies spring layout if not canceled
"""
if( self.__optionsDatabase.showOptionsDatabase() ):
self.__processLoadedOptions()
self.main(selection)
def main(self, selection):
if( not selection ): return
atom3i = self.atom3i
nodeObject.nodeList = []
edgeObject.edgeList = []
edgeObject.dc = self.dc
#------------------------- INFORMATION GATHERING -------------------------
# Generate a datastructure for the Nodes and Edges in the diagram, containing
# only the information needed by this algorithm.
edgeList = []
nodeDict = dict()
self.sourceTargetDict = dict()
for obj in selection:
if( isConnectionLink( obj ) ):
# Edge!
edgeList.append( obj.getSemanticObject() )
else:
# Node
pos = obj.getCenterCoord()
boundBox = obj.getbbox()
if( self.__stickyBoundary ):
boundary = self.atom3i.CANVAS_SIZE_TUPLE
else:
boundary = None
n = nodeObject( obj, pos, boundBox, self.__chargeStrength, boundary )
nodeDict.update( { obj : n } )
# Now lets go through the "node" edges...
for node in edgeList:
# Source object
key = node.in_connections_[0].graphObject_
if( not nodeDict.has_key( key ) ): continue
source = nodeDict[ key ]
# Target object
key = node.out_connections_[0].graphObject_
if( not nodeDict.has_key( key ) ): continue
target = nodeDict[ key ]
# Make the edge object with the info...
edgeObject(node, source, target)
self.sourceTargetDict[ source ] = target
# These nodes have edges...
source.setHasEdgeTrue()
target.setHasEdgeTrue()
# Count the beans...
self.__totalNodes = len( nodeObject.nodeList )
if( self.__totalNodes <= 1 ): return
#-------------------------- MAIN SIMULATION LOOP -------------------------
# Initial card shuffling :D
if( self.__randomness ):
self.__shakeThingsUp( self.__randomness )
i = 0
while( i < self.__maxIterations ):
# Calculate the powers that be
self.__calculateRepulsiveForces()
self.__calculateAttractiveForces()
# Move move move!
for node in nodeObject.nodeList:
node.commitMove()
# Force a screen update every x calculation
if( i % self.__animationUpdates == 0 ):
self.dc.update_idletasks()
i+=1
#--------------------------- FINAL OPTIMIZATIONS -------------------------
# Optimize the arrows to use the nearest connectors
optimizeLinks( self.atom3i.cb, self.__splineArrows, self.__arrowCurvature )
# Make sure the canvas is updated
self.dc.update_idletasks()
def __shakeThingsUp( self, randomness ):
""" Randomizes positions of the nodes forming a link """
amount = int( randomness * self.__springLength )
for edgeObj in edgeObject.edgeList:
source = edgeObj.getSource()
target = edgeObj.getTarget()
dx = randint( -amount, amount )
dy = randint( -amount, amount )
source.incrementDisplacement( [dx,dy] )
source.commitMove()
dx = randint( -amount, amount )
dy = randint( -amount, amount )
target.incrementDisplacement( [dx,dy] )
target.commitMove()
def __calculateRepulsiveForces(self):
""" Every node exerts a force on every other node, prevent overlap """
# If two nodes overlap, set the distance to this value to seperate them
# Hint: the closer two nodes are, the more powerful the repulsion
overlapDistance = 1
i = 0
while( i < self.__totalNodes ):
j = 0
ax = ay = 0
source = nodeObject.nodeList[i]
iPos = source.getCoords()
sourceCharge = source.getCharge()
sourceHasEdge = source.hasEdge()
while( j < self.__totalNodes ):
if( i == j ):
j += 1
continue
target = nodeObject.nodeList[j]
j += 1
# This prevents an unattached node from affecting attached nodes
if( sourceHasEdge and not target.hasEdge() ):
continue
dx, dy, distance = self.__getDistancesTuple( source, target, overlapDistance )
# Reduce repulsion of nodes that are tied together by springs
if( self.sourceTargetDict.has_key( source ) and self.sourceTargetDict[ source ] == target ):
distance *= 2
elif( self.sourceTargetDict.has_key( target ) and self.sourceTargetDict[ target ] == source ):
distance *= 2
charge = max( sourceCharge, target.getCharge() )
electricForce = charge / ( distance * distance )
# The cutoff prevents unnecessary movement
if( electricForce < self.__friction ):
continue
# Accumlate displacement factor
ax += dx * electricForce
ay += dy * electricForce
# Store the accumlated displacement
source.incrementDisplacement( [ax,ay] )
#print vDisp, "<-- Repulsion displacement"
i+=1
def __calculateAttractiveForces(self):
""" Every edge exerts forces to draw its nodes close """
# If two nodes overlap, set distance to this value to seperate them
# Hint: the smaller the distance is relative to the spring rest length, the
# greater the serperating force will be.
overlapDistance = self.__springLength * 0.75
for edge in edgeObject.edgeList:
source = edge.getSource()
target = edge.getTarget()
# No fake edges permitted!
if( source == target or source == None or target == None):
continue
dx, dy, distance = self.__getDistancesTuple( source, target, overlapDistance )
'''
Basic Spring Equation: F = - k * x
Replacing x with (d-l)/d, you get no force at the resting spring length,
and lots of force the farther away from the spring length you are.
The neat thing here: the spring will contract when the distance exceeds
the length and expand when the distance is less than the length.
Spring Equation: F = k * ( distance - length ) / distance
'''
attractForce = self.__springConstant * (distance - self.__springLength )
if( abs(distance) > 0 ):
attractForce /= distance
disp = [ attractForce * dx, attractForce * dy ]
#print attractForce, disp, "<---- attract, disp"
# Accumulate the displacement factor at the source & target nodes
target.incrementDisplacement( disp )
source.incrementDisplacement( [-disp[0], -disp[1]] )
def __vectorLength2D(self, v ):
""" Calculates the length of the 2D vector v """
return math.sqrt( v[0] * v[0] + v[1] * v[1] )
def __getDistancesTuple( self, sourceNode, targetNode, overlapDistance ):
"""
Finds the distance between two nodes and handles overlapping.
Returns normalized dx,dy components as well as the magnitude of the distance
in a tuple.
"""
def normalizeAndFixDistance( dx, dy, distance, overlapDistance, overlap=False ):
"""
Normalizes the dx & dy variables
If distance is too small, then it modifies dx & dy arbitrarily
and sets the distance so that the spring will expand violently or
the electrical charge will blast away...
"""
if( distance < 1 or overlap ):
if( abs( dx ) < 1 ):
if( dx < 0 ): dx = -1
else: dx = 1
if( abs( dy ) < 1 ):
if( dy < 0 ): dy = -1
else: dy = 1
return (dx,dy, overlapDistance )
else:
return ( dx / distance, dy / distance, distance )
sx, sy, sw, sh = sourceNode.getCoordsAndSize()
tx, ty, tw, th = targetNode.getCoordsAndSize()
# Position Delta
dx = sx - tx
dy = sy - ty
# Overlap area
ox = ( sw + tw ) / 2.0
oy = ( sh + th ) / 2.0
if( dx < 0 ): ox = -ox
if( dy < 0 ): oy = -oy
# Total Node overlap
if( abs( dx ) < abs( ox ) and abs( dy ) < abs( oy ) ):
distance = self.__vectorLength2D( [dx,dy] )
return normalizeAndFixDistance( dx, dy, distance, overlapDistance, overlap=True)
# No Node Overlap (but maybe overlap along an axis)
else:
# If the distance exceeds the size of the nodes, subtract the size
# otherwsie, set the distance to zero.
if( abs( dx ) > abs( ox ) ): dx -= ox
else: dx = 0
if( abs( dy ) > abs( oy ) ): dy -= oy
else: dy = 0
distance = self.__vectorLength2D( [dx,dy] )
return normalizeAndFixDistance( dx, dy, distance, overlapDistance)
class nodeObject:
nodeList = []
def __init__(self, graphObject, pos, boundBox, chargeStrength, boundary ):
self.graphObject = graphObject
self.__hasEdge = False
self.__boundBox = boundBox
self.__pos = pos
self.__displacement = [0,0]
self.__boundary = boundary
self.__boundaryPad = 50
self.__width = abs( (boundBox[0] - boundBox[2]) )
self.__height = abs( (boundBox[1] - boundBox[3]) )
self.__charge = chargeStrength * math.sqrt( self.__width*self.__width + self.__height*self.__height )
nodeObject.nodeList.append( self )
def setHasEdgeTrue(self):
self.__hasEdge = True
def hasEdge(self):
return self.__hasEdge
def getBoundaryBox(self):
return self.__boundBox
def getCoordsAndSize( self ):
return ( self.__pos[0], self.__pos[1], self.__width , self.__height )
def getCoords(self):
return self.__pos
def getCharge(self):
return self.__charge
def incrementDisplacement(self, vDisp, ):
self.__displacement = [ self.__displacement[0] + vDisp[0],
self.__displacement[1] + vDisp[1] ]
def commitMove( self ):
dx, dy = self.__displacement
self.__displacement = [0,0]
self.movePositionAndBoundary( dx,dy )
# Prevent objects from leaving the canvas area
if( self.__boundary ):
x0,y0,x1,y1 = self.__boundary
if( self.__boundBox[0] < x0 + self.__boundaryPad ):
return self.movePositionAndBoundary( -dx,-dy )
elif( self.__boundBox[1] < y0 + self.__boundaryPad ):
return self.movePositionAndBoundary( -dx,-dy )
elif( self.__boundBox[2] + self.__boundaryPad > x1 ):
return self.movePositionAndBoundary( -dx,-dy )
elif( self.__boundBox[3] + self.__boundaryPad > y1 ):
return self.movePositionAndBoundary( -dx,-dy )
self.graphObject.Move( dx,dy )
def movePositionAndBoundary( self, dx,dy ):
self.__pos = [ self.__pos[0] + dx , self.__pos[1] + dy ]
self.__boundBox = [ self.__boundBox[0] + dx, self.__boundBox[1] + dy,
self.__boundBox[2] + dx, self.__boundBox[3] + dy ]
class edgeObject:
edgeList = []
dc = None
def __init__(self,ASGnode, source, target):
self.ASGnode = ASGnode
# These are of type nodeObject
self.__source = source
self.__target = target
edgeObject.edgeList.append( self )
def getSource(self):
return self.__source
def getTarget(self):
return self.__target
def getGrpahicalObject(self):
return self.ASGnode.graphObject_
|
Balannen/LSMASOMM
|
atom3/Kernel/Layout/SpringLayout.py
|
Python
|
gpl-3.0
| 20,022
|
[
"BLAST"
] |
a0df4fec22ee2cc2eff233307a768e59188adbca004e70e550e242d714f31c57
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 C Sommer, C Straehle, U Koethe, FA Hamprecht. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE ABOVE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of their employers.
from ilastik.modules.classification.core.features.featureBase import *
import vigra
#*******************************************************************************
# H e s s i a n O f G a u s s i a n *
#*******************************************************************************
class HessianOfGaussian(FeatureBase):
name = "Hessian matrix of Gaussian"
groups = ['Orientation']
numOutputChannels2d = 3
numOutputChannels3d = 6
def __init__(self, sigma):
FeatureBase.__init__(self,sigma)
self.minContext = int(numpy.ceil(sigma * 3.5))
def compute2d(self, data):
if hasattr(vigra.filters, 'hessianOfGaussian2D'):
# Vigra with axistags
def hessianOfGaussianEigenvalues(data, sigma):
return vigra.filters.hessianOfGaussian2D(data, sigma)
func = hessianOfGaussianEigenvalues
elif hasattr(vigra.filters, 'hessianOfGaussianEigenvalues'):
# Vigra without axistags
func = vigra.filters.hessianOfGaussian
else:
raise RuntimeError('Vigra version does not have hessianOfGausian')
result = self.applyToAllChannels(data, func, self.sigma)
return result
def compute3d(self, data):
if hasattr(vigra.filters, 'hessianOfGaussian3D'):
# Vigra with axistags
def hessianOfGaussianEigenvalues(data, sigma):
return vigra.filters.hessianOfGaussian3D(data, sigma)
func = hessianOfGaussianEigenvalues
elif hasattr(vigra.filters, 'hessianOfGaussianEigenvalues'):
# Vigra without axistags
func = vigra.filters.hessianOfGaussian
else:
raise RuntimeError('Vigra version does not have hessianOfGausian')
result = self.applyToAllChannels(data, func, self.sigma)
return result
#*******************************************************************************
# H e s s i a n O f G a u s s i a n E i g e n v a l u e s *
#*******************************************************************************
class HessianOfGaussianEigenvalues(FeatureBase):
name = "Eigenvalues of Hessian matrix of Gaussian"
groups = ['Texture']
numOutputChannels2d = 2
numOutputChannels3d = 3
def __init__(self, sigma):
FeatureBase.__init__(self,sigma)
self.minContext = int(numpy.ceil(sigma * 3.5))
def compute2d(self, data):
if hasattr(vigra.filters, 'hessianOfGaussian2D'):
# Vigra with axistags
def hessianOfGaussianEigenvalues(data, sigma):
return vigra.filters.tensorEigenvalues(vigra.filters.hessianOfGaussian2D(data, sigma))
func = hessianOfGaussianEigenvalues
elif hasattr(vigra.filters, 'hessianOfGaussianEigenvalues'):
# Vigra without axistags
func = vigra.filters.hessianOfGaussianEigenvalues
else:
raise RuntimeError('Vigra version does not have hessianOfGausian')
result = self.applyToAllChannels(data, func, self.sigma)
return result
def compute3d(self, data):
if hasattr(vigra.filters, 'hessianOfGaussian3D'):
# Vigra with axistags
def hessianOfGaussianEigenvalues(data, sigma):
return vigra.filters.tensorEigenvalues(vigra.filters.hessianOfGaussian3D(data, sigma))
func = hessianOfGaussianEigenvalues
elif hasattr(vigra.filters, 'hessianOfGaussianEigenvalues'):
# Vigra without axistags
func = vigra.filters.hessianOfGaussianEigenvalues
else:
raise RuntimeError('Vigra version does not have hessianOfGausian')
result = self.applyToAllChannels(data, func, self.sigma)
return result
#*******************************************************************************
# S t r u c t u r e T e n s o r E i g e n v a l u e s *
#*******************************************************************************
class StructureTensorEigenvalues(FeatureBase):
name = "Eigenvalues of structure tensor"
groups = ['Texture']
numOutputChannels2d = 2
numOutputChannels3d = 3
def __init__(self, sigma):
FeatureBase.__init__(self,sigma)
self.minContext = int(numpy.ceil(sigma * 3.5))
def compute2d(self, data):
func = vigra.filters.structureTensorEigenvalues
result = self.applyToAllChannels(data, func, self.sigma, self.sigma / 2.0)
return result
def compute3d(self, data):
func = vigra.filters.structureTensorEigenvalues
result = self.applyToAllChannels(data, func, self.sigma, self.sigma / 2.0)
return result
#*******************************************************************************
# G a u s s i a n G r a d i e n t M a g n i t u d e *
#*******************************************************************************
class GaussianGradientMagnitude(FeatureBase):
name = "Gradient Magnitude of Gaussian"
groups = ['Edge']
numOutputChannels2d = 1
numOutputChannels3d = 1
def __init__(self, sigma):
FeatureBase.__init__(self,sigma)
self.minContext = int(numpy.ceil(sigma * 3.5))
def compute2d(self, data):
func = vigra.filters.gaussianGradientMagnitude
result = self.applyToAllChannels(data, func, self.sigma)
return result
def compute3d(self, data):
func = vigra.filters.gaussianGradientMagnitude
result = self.applyToAllChannels(data, func, self.sigma)
return result
#*******************************************************************************
# G a u s s i a n S m o o t h i n g *
#*******************************************************************************
class GaussianSmoothing(FeatureBase):
name = "Gaussian Smoothing"
groups = ['Color']
numOutputChannels2d = 1
numOutputChannels3d = 1
def __init__(self, sigma):
FeatureBase.__init__(self,sigma)
self.minContext = int(numpy.ceil(sigma * 3.5))
def compute2d(self, data):
func = vigra.filters.gaussianSmoothing
result = self.applyToAllChannels(data, func, self.sigma)
return result
def compute3d(self, data):
func = vigra.filters.gaussianSmoothing
result = self.applyToAllChannels(data, func, self.sigma)
return result
#*******************************************************************************
# S t r u c t u r e T e n s o r *
#*******************************************************************************
class StructureTensor(FeatureBase):
name = "Structure Tensor"
groups = ['Orientation']
numOutputChannels2d = 3
numOutputChannels3d = 6
def __init__(self, sigma):
FeatureBase.__init__(self,sigma)
self.minContext = int(numpy.ceil(sigma * 3.5))
def compute2d(self, data):
func = vigra.filters.structureTensor
result = self.applyToAllChannels(data, func, self.sigma, self.sigma / 2.0)
return result
def compute3d(self, data):
func = vigra.filters.structureTensor
result = self.applyToAllChannels(data, func, self.sigma, self.sigma / 2.0)
return result
#*******************************************************************************
# L a p l a c i a n O f G a u s s i a n *
#*******************************************************************************
class LaplacianOfGaussian(FeatureBase):
name = "Laplacian of Gaussian"
groups = ['Edge']
numOutputChannels2d = 1
numOutputChannels3d = 1
def __init__(self, sigma):
FeatureBase.__init__(self,sigma)
self.minContext = int(numpy.ceil(sigma * 3.5))
def compute2d(self, data):
func = vigra.filters.laplacianOfGaussian
result = self.applyToAllChannels(data, func, self.sigma)
return result
def compute3d(self, data):
func = vigra.filters.laplacianOfGaussian
result = self.applyToAllChannels(data, func, self.sigma)
return result
#*******************************************************************************
# D i f f e r e n c e O f G a u s s i a n s *
#*******************************************************************************
class DifferenceOfGaussians(FeatureBase):
name = "Difference of Gaussians"
groups = ['Edge']
numOutputChannels2d = 1
numOutputChannels3d = 1
def __init__(self, sigma):
FeatureBase.__init__(self,sigma)
self.minContext = int(numpy.ceil(sigma * 3.5))
def compute2d(self, data):
def differenceOfGaussians(data, sigma):
return vigra.filters.gaussianSmoothing(data, sigma) - vigra.filters.gaussianSmoothing(data, sigma * 0.66)
func = differenceOfGaussians
result = self.applyToAllChannels(data, func, self.sigma)
return result
def compute3d(self, data):
def differenceOfGaussians(data, sigma):
return vigra.filters.gaussianSmoothing(data, sigma) - vigra.filters.gaussianSmoothing(data, sigma * 0.66)
func = differenceOfGaussians
result = self.applyToAllChannels(data, func, self.sigma)
return result
#LocalFeature('Canny', ['Sigma' ], (1, 1), lambda x, s: vigra.analysis.cannyEdgeImage(x, s, 0, 1))
#morphologicalOpening = LocalFeature('Morph Opening', ['Sigma' ], (1, 1), lambda x, s: vigra.morphology.discOpening(x.astype(numpy.uint8), int(s * 1.5 + 1)))
#morphologicalClosing = LocalFeature('Morph Colosing', ['Sigma' ], (1, 1), lambda x, s: vigra.morphology.discClosing(x.astype(numpy.uint8), int(s * 1.5 + 1)))
#svenSpecialWaveFrontDistance = LocalFeature('SvenSpecial 1', [], (1, 1), lambda x: svenSpecial(x))
#svenSpecialWaveFrontDistance = LocalFeature('SvenSpecial 2', [], (1, 1), lambda x: svenSpecialSpecial(x))
#def svenSpecial(x):
# res = vigra.analysis.cannyEdgeImage(x, 2.0, 0.39, 1)
# if numpy.max(res) == 0:
# res[:, :] = 3000
# return res
# else:
# return vigra.filters.distanceTransform2D(res)
#
#def svenSpecialSpecial(x):
# temp = numpy.zeros(x.shape + (4,))
#
# res = vigra.analysis.cannyEdgeImage(x, 2.0, 0.39, 1)
# if numpy.max(res) == 0:
# res[:, :] = 3000
# else:
# res = vigra.filters.distanceTransform2D(res)
# temp[:, :, 0] = res
#
# res = vigra.analysis.cannyEdgeImage(x, 2.2, 0.42, 1)
# if numpy.max(res) == 0:
# res[:, :] = 3000
# else:
# res = vigra.filters.distanceTransform2D(res)
# temp[:, :, 1] = res
#
# res = vigra.analysis.cannyEdgeImage(x, 1.9, 0.38, 1)
# if numpy.max(res) == 0:
# res[:, :] = 3000
# else:
# res = vigra.filters.distanceTransform2D(res)
# temp[:, :, 2] = res
#
# res = vigra.analysis.cannyEdgeImage(x, 1.8, 0.38, 1)
# if numpy.max(res) == 0:
# res[:, :] = 3000
# else:
# res = vigra.filters.distanceTransform2D(res)
# temp[:, :, 3] = res
#
#
# return temp
|
ilastik/ilastik-0.5
|
ilastik/modules/classification/core/features/standardFeatures.py
|
Python
|
bsd-2-clause
| 12,940
|
[
"Gaussian"
] |
62ac36d84de6106c12875c30d3840dac121ab043f12a324e3bb0c7721445e15d
|
import warnings
import sys
import scipy
try:
import mayavi.mlab as mlab
import tvtk.api
from tvtk.util.ctf import PiecewiseFunction
except ImportError:
warnings.warn("mayavi and tvtk modules could not be loaded"
"mayavi plotting is unavailable")
def plotLine(vector,val=1.0, close=False, tube_radius=None, index=None, **kwargs):
"""
PlotLine creates a single plot object from a singular vector or from a n-dimensional
tuple or list.
"""
plot = False
try:
x = vector.x()
temp0 = x[0]
temp1 = x[1]
temp2 = x[2]
s = val*scipy.ones(temp0.shape)
# For surface objects, this keyword allows for the last corner to connect with the first
if close:
temp0 = scipy.concatenate((temp0,scipy.atleast_1d(temp0[0])))
temp1 = scipy.concatenate((temp1,scipy.atleast_1d(temp1[0])))
temp2 = scipy.concatenate((temp2,scipy.atleast_1d(temp2[0])))
s = scipy.concatenate((s,scipy.atleast_1d(s[0])))
if not index is None:
N = len(temp0)
connect = scipy.vstack([scipy.arange(index, index + N - 1.5),
scipy.arange(index + 1, index + N - .5)]
).T # I want to rewrite this...
index += N
except AttributeError:
temp0 = []
temp1 = []
temp2 = []
s = []
connect = []
# if it is not some sort of vector or vector-derived class, iterate through and make a surface object
if index is None:
index = 0
plot = True
for i in vector:
output = plotLine(i, close=close, index=index, **kwargs)
temp0 += [output[0]]
temp1 += [output[1]]
temp2 += [output[2]]
s += [output[3]]
connect += [output[4]]
index = output[5]
#turn to arrays here so I don't accidentally nest lists or tuples
temp0 = scipy.hstack(temp0)
temp1 = scipy.hstack(temp1)
temp2 = scipy.hstack(temp2)
s = scipy.hstack(s)
connect = scipy.vstack(connect)
if index is None:
try:
mlab.plot3d(temp0,
temp1,
temp2,
s,
vmin=0.,
vmax=1.,
tube_radius=tube_radius,
**kwargs)
except ValueError:
mlab.plot3d(temp0.flatten(),
temp1.flatten(),
temp2.flatten(),
s.flatten(),
vmin=0.,
vmax=1.,
tube_radius=tube_radius,
**kwargs)
else:
if plot:
# follows http://docs.enthought.com/mayavi/mayavi/auto/example_plotting_many_lines.html#example-plotting-many-lines
src = mlab.pipeline.scalar_scatter(temp0, temp1, temp2, s)
src.mlab_source.dataset.lines = connect
lines = mlab.pipeline.stripper(src)
mlab.pipeline.surface(lines, **kwargs)
else:
return (temp0,temp1,temp2,s,connect,index)
def plotTokamak(tokamak, angle=[0,scipy.pi*2], pts=250, section=None, **kwargs):
temp = []
for i in xrange(tokamak.norm.s.size):
temp += [tokamak.getVessel(i).edge(angle=angle, pts=pts)]
if not section is None:
outline = tokamak.getMachineCrossSection()
for i in scipy.linspace(angle[0],angle[1],section+1):
temp += [outline.copy()]
temp[-1].spin(i)
plotLine(temp,**kwargs)
else:
plotLine(temp[1:],**kwargs)
def plotSurf(surf):
k = surf.edge().x()
mlab.mesh(k[0].reshape((2,2)),
k[1].reshape((2,2)),
k[2].reshape((2,2)))
def plotView(rays,pts=None, **kwargs):
if not pts is None:
x = scipy.zeros((len(rays)+1,pts))
y = scipy.zeros(x.shape)
z = scipy.zeros(x.shape)
for i in rays:
i.norm.s = scipy.linspace(i.norm.s[0],i.norm.s[-1],pts)
else:
x = scipy.zeros((len(rays)+1,len(rays[0].norm.s)))
y = scipy.zeros(x.shape)
z = scipy.zeros(x.shape)
for i in xrange(len(rays)):
if rays[i]._origin.flag:
rays[i] = rays[i].c()
x[i] = rays[i].x()[0]
y[i] = rays[i].x()[1]
z[i] = rays[i].x()[2]
x[-1] = rays[0].x()[0]
y[-1] = rays[0].x()[1]
z[-1] = rays[0].x()[2]
mlab.mesh(x,y,z,**kwargs)
def plotVol(volume,pts=15,**kwargs):
fluxGrid = scipy.squeeze(volume.getFluxGrid()).T
datain = scipy.zeros((fluxGrid.shape[0],
pts,
fluxGrid.shape[1]))
for idx in range(pts):
datain[:,idx,:] = fluxGrid
temp = genCylGrid(plasma.eq.getRGrid(),
scipy.linspace(0,2*scipy.pi,pts),
plasma.eq.getZGrid())
verticies = genVertsFromPixel(temp)
hex_type = tvtk.api.tvtk.Hexahedron().cell_type
temp = temp.reshape((temp.size/3,3))
verticies = verticies.reshape((verticies.size/8,8))
sg = tvtk.api.tvtk.UnstructuredGrid(points=temp)
sg.set_cells(hex_type,verticies)
sg.point_data.scalars = datain.flatten()
sg.point_data.scalars.name = 'temp'
psi_0 = plasma.eq.getFluxAxis()
psi_LCFS = plasma.eq.getFluxLCFS()
psi_min = fluxGrid.min()
psi_max = fluxGrid.max()
v1 = (psi_0 - psi_min)/(psi_max - psi_min)
v2 = (psi_LCFS - psi_min)/(psi_max - psi_min)
mlab.pipeline.volume(sg)
def plotSymIso(plasma,pts=15,**kwargs):
fluxGrid = scipy.squeeze(plasma.eq.getFluxGrid()).T
datain = scipy.zeros((fluxGrid.shape[1],
pts,
fluxGrid.shape[0]))
for idx in range(pts):
datain[:,idx,:] = fluxGrid
temp = genCylGrid(plasma.eq.getRGrid(),
scipy.linspace(0,2*scipy.pi,pts),
plasma.eq.getZGrid())
temp = temp.reshape((temp.size/3,3))
sgrid = tvtk.api.tvtk.StructuredGrid(dimensions=(plasma.eq.getRGrid().size,
pts,
plasma.eq.getZGrid().size))
sgrid.points = temp
sgrid.point_data.scalars = datain.ravel()
sgrid.point_data.scalars.name = 'scalars'
mlab.pipeline.iso_surface(sgrid,**kwargs)
def plotVol2(plasma,pts=None,lim=False,**kwargs):
if pts is None:
pts = plasma.eq.getRGrid().size
zmin = plasma.eq.getMachineCrossSection()[1].min()
zmax = plasma.eq.getMachineCrossSection()[1].max()
rmax = plasma.eq.getMachineCrossSection()[0].max()
x,y,z = scipy.mgrid[-rmax:rmax:(2*rmax)/pts,-rmax:rmax:(2*rmax)/pts,zmin:zmax:(zmax-zmin)/pts]
r = (x**2 + y**2)**.5
psi = plasma.eq.rz2psi(r,z)
if lim:
vmin = plasma.eq.getFluxAxis()*-1
vmax = plasma.eq.getFluxLCFS()
mmax = plasma.eq.getFluxGrid().max()
vmin = 0
vmax = (vmax - vmin)/(mmax-vmin)
print(vmax)
vol = mlab.pipeline.volume(mlab.pipeline.scalar_field(x,y,z,psi),vmin=0,vmax=vmax,**kwargs)
# otf = PiecewiseFunction()
# otf.add_point(-vmin, .8)
# vol._otf = otf
# vol._volume_property.set_scalar_opacity(otf)
else:
mlab.pipeline.volume(mlab.pipeline.scalar_field(x,y,z,psi),**kwargs)
def genCartGrid(x0, x1, x2, edges = False):
if edges:
for i in (x0,x1,x2):
i = scipy.insert(i,0,2*i[1]-i[2])
i = scipy.append(i,2*i[-1]-i[-2])
i = (i[1:]+i[:-1])/2
pnts = scipy.empty((x0.size, x1.size, x2.size,3))
x0in,x1in,x2in = scipy.meshgrid(x0, x1, x2, indexing='ij')
pnts[:,:,:,0] = x0in
pnts[:,:,:,1] = x1in
pnts[:,:,:,2] = x2in
return pnts
def genCylGrid(x0,x1,x2,edges=False):
if edges:
for i in (x0,x1,x2):
i = scipy.insert(i,0,2*i[1]-i[2])
i = scipy.append(i,2*i[-1]-i[-2])
i = (i[1:]+i[:-1])/2
pnts = scipy.empty((x0.size, x1.size, x2.size,3))
xin = scipy.dot(scipy.atleast_2d(x0).T, scipy.atleast_2d(scipy.cos(x1)))
yin = scipy.dot(scipy.atleast_2d(x0).T, scipy.atleast_2d(scipy.sin(x1)))
zee = scipy.ones(yin.shape)
for i in range(x2.size):
pnts[:,:,i,0] = xin
pnts[:,:,i,1] = yin
pnts[:,:,i,2] = x2[i]*zee
return pnts
def genVertsFromPixel(grid):
"""reduces the lengths of the dimensions by 1"""
output = scipy.empty(scipy.append(scipy.array(grid.shape[:-1]) - 1,8),dtype=int)
shape = grid.shape
#INDEXING DEPENDENT
idx,jdx,kdx = scipy.mgrid[0:shape[0] - 1,
0:shape[1] - 1,
0:shape[2] - 1]
output[...,0] = idx + shape[0]*(jdx + shape[1]*kdx)
output[...,1] = idx + 1 + shape[0]*(jdx + shape[1]*kdx)
output[...,2] = idx + 1 + shape[0]*(jdx + 1 + shape[1]*kdx)
output[...,3] = idx + shape[0]*(jdx + 1 + shape[1]*kdx)
output[...,4] = idx + shape[0]*(jdx + shape[1]*(kdx + 1))
output[...,5] = idx + 1 + shape[0]*(jdx + shape[1]*(kdx + 1))
output[...,6] = idx + 1 + shape[0]*(jdx + 1 + shape[1]*(kdx + 1))
output[...,7] = idx + shape[0]*(jdx + 1 + shape[1]*(kdx + 1))
return output
def plotTangency(plasma,beam):
""" nonlinear minimization of r^2 between the plasma center (in R,Z) versus defined beam s
vector """
print('not implemented yet')
|
icfaust/TRIPPy
|
TRIPPy/plot/mayaplot.py
|
Python
|
mit
| 9,720
|
[
"Mayavi"
] |
3e7aef0ceb78ebea98f571543ccc2945cd532b24d65e0d624841c633c63e1a9d
|
"""Classes and methods for segmentation of spherical objects within cells."""
# IMPORT DEPENDENCIES
import matplotlib
import os
import sys
import pickle
import time
from operator import itemgetter
import numpy as np
import pandas as pd
from skimage import io
from skimage.morphology import watershed
from skimage.feature import canny
from scipy.ndimage.filters import gaussian_filter, maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_closing
from scipy.ndimage.morphology import distance_transform_edt
from scipy.ndimage.morphology import binary_erosion, binary_dilation
from scipy.ndimage.morphology import binary_fill_holes, binary_opening
from scipy.ndimage import generic_gradient_magnitude, sobel
import matplotlib.pyplot as plt
matplotlib.use('Agg')
class PexSegmentObj:
'''A container class for objects generated by PexSegmenter.segment().
Objects of class PexSegmentObj contain a raw multipage TIFF image in a
numpy ndarray format, a similarly formatted image with the segmentation
output from PexSegmenter.segment(), a number of segmentation intermediates
that may be useful for diagnosis of segmentation problems, and relevant
metadata that may be useful for analysis. Class methods for output of image
data in TIFF format, for saving of object data in csv format, and for
pickling the entire object are included.
IMPORTANT: Do not call this class on its own. Objects of this class are
generated as output from PexSegmenter.segment(), which provides all of the
parameters described in __init__.
Args !!!IMPORTANT: DO NOT CALL! Passed by PexSegmenter.segment()!!!
f_directory (str): Path to the raw image used for segmentation.
filename (str): raw image filename.
raw_img (np.ndarray of ints): pixel intensity values of raw input image
used as a starting point for segmentation. Each array position
represents a single pixel.
gaussian_img (np.ndarray of ints): pixel intensity values of gaussian
filter output from PexSegmenter.segment().
seg_method (str): Segmentation method provided to
PexSegmenter.__init__().
mode (str): Segmentation mode provided to PexSegmenter.__init__().
threshold_img (binary np.ndarray): Binary array of pixels corresponding
to a segmented object.
dist_map (np.ndarray of ints): ndarray of the Euclidean distance of
each pixel marked as 1 in threshold_img to a background pixel (a
0). Generated by PexSegmenter.segment().
smooth_dist_map (np.ndarray of ints): Smoothed distance map generated
by PexSegmenter.segment().
maxima (binary np.ndarray): Local maxima from the smooth_dist_map,
generated by PexSegmenter.segment().
labs (np.ndarray of ints): Labeled starting points for watershed
segmentation. Generated by PexSegmenter.segment().
watershed_output (np.ndarray of ints): Segmented objects, the final output
from PexSegmenter.segment(). Each array position represents a
single pixel, and all indices with a given integer value make up
one segmented object. 0 represents background.
obj_nums (list of ints): The list of numerical values assigned to
segmented objects.
volumes (dict): A dictionary with obj_num:volume pairs, with volume
being the size of the segmented object in pixels.
to_pdout (list of strings): The names of the attributes to be passed to
to_csv() and to_pandas() for output. Assigned by
PexSegmenter.segment().
mode_params (dict): Dict of additional variables from
PexSegmenter.segment() passed to PexSegmentObj for inclusion in
attributes. Varies depending upon segmentation method. See
PexSegmenter.segment() code for details.
Attributes:
f_directory (str): Path to the raw image used for segmentation.
filename (str): raw image filename.
raw_img (np.ndarray of ints): pixel intensity values of raw input image
used as a starting point for segmentation. Each array position
represents a single pixel.
gaussian_img (np.ndarray of ints): pixel intensity values of gaussian
filter output from PexSegmenter.segment().
seg_method (str): Segmentation method provided to
PexSegmenter.__init__().
mode (str): Segmentation mode provided to PexSegmenter.__init__().
threshold_img (binary np.ndarray): Binary array of pixels corresponding
to a segmented object.
dist_map (np.ndarray of ints): ndarray of the Euclidean distance of
each pixel marked as 1 in threshold_img to a background pixel (a
0). Generated by PexSegmenter.segment().
smooth_dist_map (np.ndarray of ints): Smoothed distance map generated
by PexSegmenter.segment().
maxima (binary np.ndarray): Local maxima from the smooth_dist_map,
generated by PexSegmenter.segment().
labs (np.ndarray of ints): Labeled starting points for watershed
segmentation. Generated by PexSegmenter.segment().
peroxisomes (np.ndarray of ints): Segmented objects, the final output
from PexSegmenter.segment(). Each array position represents a
single pixel, and all indices with a given integer value make up
one segmented object. 0 represents background.
slices (int): The number of Z-slices in the image.
height (int): The Y-direction image size in pixels.
width (int): The X-direction image size in pixels.
obj_nums (list of ints): The list of numerical values assigned to
segmented objects.
npexs (int): The number of segmented objects.
volumes (dict): A dictionary with obj_num:volume pairs, with volume
being the size of the segmented object in pixels.
volumes_flag (str): The units for volume measurement. Always assigned
as 'pixels'.
pdout (list of strings): The names of the attributes to be passed to
to_csv() and to_pandas() for output. Assigned by
PexSegmenter.segment().
border_rm_flag (bool): Indication of whether or not objects on the edge
of the image are removed. Defaults to False.
'''
def __init__(self, f_directory, filename, raw_img, gaussian_img,
seg_method, mode, threshold_img, dist_map,
smooth_dist_map, maxima, labs, watershed_output,
obj_nums, volumes, to_pdout = [],
mode_params = {}):
'''Initialize the PexSegmentObj with segmentation data.'''
print('creating PexSegmentObj...')
self.f_directory = f_directory
self.filename = os.path.basename(filename).lower()
self.raw_img = raw_img.astype('uint16')
self.gaussian_img = gaussian_img.astype('uint16')
self.seg_method = seg_method
self.mode = mode
self.threshold_img = threshold_img.astype('uint16')
self.dist_map = dist_map.astype('uint16')
self.smooth_dist_map = smooth_dist_map.astype('uint16')
self.maxima = maxima.astype('uint16')
self.labs = labs.astype('uint16')
self.peroxisomes = watershed_output.astype('uint16')
self.slices = self.raw_img.shape[0]
self.height = self.raw_img.shape[1]
self.width = self.raw_img.shape[2]
self.obj_nums = obj_nums
self.npexs = len(self.obj_nums)
self.volumes = volumes
self.volumes_flag = 'pixels'
self.pdout = []
self.border_rm_flag = False
for key in mode_params:
if hasattr(self, key):
# raise an error if an attribute is somehow passed twice
raise AttributeError('Two copies of the attribute ' + key +
'were provided to PexSegmentObj.__init__()')
setattr(self, key, mode_params[key])
if to_pdout != []:
for x in to_pdout:
self.pdout.append(x)
def __repr__(self):
return 'PexSegmentObj '+ self.filename
def plot_raw_img(self, display = False):
'''Plot the raw image using matplotlib.'''
self.plot_stack(self.raw_img, colormap = 'gray')
if display == True:
plt.show()
def plot_gaussian_img(self, display = False):
'''Plot the gaussian image using matplotlib.'''
self.plot_stack(self.gaussian_img, colormap = 'gray')
if display == True:
plt.show()
def plot_threshold_img(self, display = False):
'''Plot the threshold image using matplotlib.'''
self.plot_stack(self.threshold_img, colormap = 'gray')
if display == True:
plt.show()
def plot_dist_map(self, display = False):
'''Plot the distance map image using matplotlib.'''
self.plot_stack(self.dist_map)
if display == True:
plt.show()
def plot_smooth_dist_map(self, display = False):
'''Plot the smoothed distance map image using matplotlib.'''
self.plot_stack(self.smooth_dist_map)
if display == True:
plt.show()
def plot_maxima(self, display = False):
'''Plot the maxima image using matplotlib.'''
# expand maxima to make them more easily visible in the output.
vis_maxima = binary_dilation(self.maxima,
structure = np.ones(shape = (1,5,5)))
masked_maxima = np.ma.masked_where(vis_maxima == 0, vis_maxima)
self.plot_maxima_stack(masked_maxima, self.smooth_dist_map)
if display == True:
plt.show()
def plot_watershed(self, display = False):
'''Plot the segmented objects image using matplotlib.'''
self.plot_stack(self.peroxisomes)
if display == True:
plt.show()
def output_all_images(self, output_dir = None):
'''Write all images to a new directory.
Write all images associated with the PexSegmentObj to a new directory.
Name that directory according to the filename of the initial image that
the object was derived from, unless an output directory is provided.
Args:
output_dir (str, optional): The directory to output image files to.
If not provided, the images will be a subdirectory to the
directory containing the initial raw image, which is named based
on the filename of the raw image.
'''
if output_dir == None:
# name as described in args
output_dir = self.f_directory + '/' + self.filename[0:self.filename.index('.tif')]
# make the output directory if it doesn't already exist
if not os.path.isdir(output_dir):
print('creating output directory...')
os.mkdir(output_dir)
os.chdir(output_dir)
print('writing images...')
# save numpy ndarrays as tif images using skimage.io.imsave
io.imsave('raw_'+self.filename, self.raw_img)
io.imsave('gaussian_'+self.filename, self.gaussian_img)
io.imsave('threshold_'+self.filename, self.threshold_img)
io.imsave('dist_'+self.filename, self.dist_map)
io.imsave('smooth_dist_'+self.filename,self.smooth_dist_map)
io.imsave('maxima_'+self.filename,self.maxima)
io.imsave('wshed_'+self.filename,self.peroxisomes)
if hasattr(self,'edges'):
io.imsave('edges_'+self.filename,self.edges)
def output_image(self, imageattr, output_dir = None):
'''Write one specific image attribute to a new directory.
Write an image associated with the PexSegmentObj to a new directory.
Name that directory according to the filename of the initial image that
the object was derived from, unless an output directory is provided.
Args:
imageattr (str): The name of the image attribute to be saved.
output_dir (str, optional): The directory to output image files to.
If not provided, the images will be a subdirectory to the
directory containing the initial raw image, which is named based
on the filename of the raw image.
'''
if output_dir == None:
output_dir = self.f_directory + '/' + self.filename[0:self.filename.index('.tif')]
# if the output directory doesn't already exist, make it.
if not os.path.isdir(output_dir):
print('creating output directory...')
os.mkdir(output_dir)
os.chdir(output_dir)
print('writing image' + str(imageattr))
# save the ndarray as a tif image using skimage.io.imsave
io.imsave(str(imageattr)+self.filename, getattr(self,str(imageattr)))
def output_plots(self):
'''Write PDFs of slice-by-slice plots.
Output: PDF plots of each image within PexSegmentObj in a directory
named for the original filename they were generated from. Plots are
generated using the plot_stack method and plotting methods defined
here.
'''
os.chdir(self.f_directory)
if not os.path.isdir(self.f_directory + '/' +
self.filename[0:self.filename.index('.tif')]):
print('creating output directory...')
os.mkdir(self.f_directory + '/' +
self.filename[0:self.filename.index('.tif')])
os.chdir(self.f_directory + '/' +
self.filename[0:self.filename.index('.tif')])
print('saving plots...')
self.plot_raw_img()
plt.savefig('praw_'+self.filename[0:self.filename.index('.tif')]+'.pdf')
self.plot_gaussian_img()
plt.savefig('pgaussian_' +
self.filename[0:self.filename.index('.tif')]+'.pdf')
self.plot_threshold_img()
plt.savefig('pthreshold_' +
self.filename[0:self.filename.index('.tif')]+'.pdf')
plt.savefig('pdist_' +
self.filename[0:self.filename.index('.tif')]+'.pdf')
self.plot_smooth_dist_map()
plt.savefig('psmooth_dist_' +
self.filename[0:self.filename.index('.tif')]+'.pdf')
self.plot_maxima()
plt.savefig('pmaxima_' +
self.filename[0:self.filename.index('.tif')]+'.pdf')
self.plot_watershed()
plt.savefig('pwshed_' +
self.filename[0:self.filename.index('.tif')]+'.pdf')
def pickle(self, output_dir = None, filename = None):
'''pickle the PexSegmentObj for later loading.'''
if output_dir == None:
output_dir = self.f_directory + '/' + self.filename[0:self.filename.index('.tif')]
if filename == None:
filename = self.filename[0:self.filename.index('.tif')] + '.pickle'
if not os.path.isdir(output_dir):
print('creating output directory...')
os.mkdir(output_dir)
os.chdir(output_dir)
with open(filename, 'wb') as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
f.close()
def output_all(self):
'''Output images as tifs and plots as pdfs, and pickle object.'''
os.chdir(self.f_directory)
# make output directories if they don't exist
if not os.path.isdir(self.f_directory + '/' +
self.filename[0:self.filename.index('.tif')]):
os.mkdir(self.f_directory + '/' +
self.filename[0:self.filename.index('.tif')])
os.chdir(self.f_directory + '/' +
self.filename[0:self.filename.index('.tif')])
print('outputting all data...')
self.output_plots()
self.output_all_images()
self.pickle()
# TODO: UPDATE THIS METHOD TO INCLUDE PANDAS OUTPUT
def to_csv(self, output_dir = None):
'''Output attributes designated by self.pdout in csv format.
Args:
output_dir (str, optional): Directory to output csv to. If not
provided, a new subdirectory is created within the directory
containing the raw image named based on the raw image name, and the
csv is saved there.
'''
os.chdir(self.f_directory)
if output_dir == None:
output_dir = self.f_directory + '/' + self.filename[0:self.filename.index('.tif')]
if not os.path.isdir(output_dir):
print('creating output directory...')
os.mkdir(output_dir)
os.chdir(output_dir)
for_csv = self.to_pandas()
for_csv.to_csv(path_or_buf = output_dir + '/' +
self.filename[0:self.filename.index('.tif')]+ '.csv',
index = True, header = True)
def rm_border_objs(self, border = 1, z = True):
'''remove all objects that contact the edge of the 3D stack.
Args:
border (int, optional): the size of the border around the edge which a pixel from
the object must contact to be removed. Defaults to 1.
z (bool, optional): should objects that contact the z-axis edge be eliminated? if
True, any object with a pixel in the top or bottom image of the
stack is removed. Defaults to True.
Output: alters the objects within the PexSegmentObj. removes objects
from the peroxisomes image, the obj_nums, and all other variables with
elements of obj_nums as keys in a dict (parents, volumes, etc)
'''
border_mask = np.full(shape = self.peroxisomes.shape, fill_value = True,
dtype = bool)
if z == True:
border_mask[border:-border,border:-border,border:-border] = False
elif z == False:
border_mask[:,border:-border,border:-border] = False
objs_to_rm = np.unique(self.peroxisomes[border_mask])
objs_to_rm = objs_to_rm[objs_to_rm != 0]
for x in objs_to_rm:
self.peroxisomes[self.peroxisomes == x] = 0
self.obj_nums.remove(x)
self.volumes.pop(x, None)
if hasattr(self, "parent"):
self.parent.pop(x, None)
self.npexs = len(self.obj_nums)
self.border_rm_flag = True
# HELPER METHODS #
def to_pandas(self):
'''create a pandas DataFrame of tabulated numeric data.
the pdout attribute indicates which variables to include in the
DataFrame.
Helper method for self.to_csv().
'''
df_dict = {}
for attr in self.pdout:
df_dict[str(attr)] = pd.Series(getattr(self, attr))
if 'volumes' in self.pdout:
vflag_out = dict(zip(self.obj_nums,
[self.volumes_flag]*len(self.obj_nums)))
df_dict['volumes_flag'] = pd.Series(vflag_out)
return pd.DataFrame(df_dict)
def convert_volumes(self, z = 0.2, x = 0.0675):
'''convert volumes from units of pixels to metric units.
The default values provided correspond to the appropriate values for
imaging using the Murray spinning disk confocal microscope with 0.2 um
spacing in between slices.
Args:
z (float, optional): the distance between slices in the z-stack in
units of microns.
x (float, optional): the linear distance between adjacent pixels in
each slice in units of microns. x is also used for y.
Output: converts self.volumes to units of femtoliters, and changes the
self.volumes_flag to 'femtoliters'.
'''
conv_factor = z*x*x
for key, val in self.volumes:
self.volumes[key] = float(self.volumes[key])*conv_factor
self.volumes_flag = 'femtoliters'
@staticmethod
def plot_stack(stack_arr, colormap='jet'):
''' Create a matplotlib plot with each subplot containing a slice.
Args:
stack_arr (ndarray of ints): A numpy ndarray containing pixel
intensity values.
colormap: The colormap to be used when displaying pixel intensities.
Defaults to jet.
Yield: a pyplot object in which each slice from the image array
is represented in a subplot. subplots are 4 columns
across (when 4 or more slices are present) with rows to
accommodate all slices.
'''
nimgs = stack_arr.shape[0] # z axis of array dictates number of slices
# plot with 4 imgs across
# determine how many rows and columns of images there are
if nimgs < 5:
f, axarr = plt.subplots(1,nimgs)
for i in range(0,nimgs):
axarr[i].imshow(stack_arr[i,:,:], cmap=colormap)
axarr[i].xaxis.set_visible(False)
axarr[i].yaxis.set_visible(False)
f.set_figwidth(16)
f.set_figheight(4)
else:
f, axarr = plt.subplots(int(np.ceil(nimgs/4)),4)
for i in range(0,nimgs):
r = int(np.floor(i/4))
c = int(i % 4)
axarr[r,c].imshow(stack_arr[i,:,:], cmap=colormap)
axarr[r,c].xaxis.set_visible(False)
axarr[r,c].yaxis.set_visible(False)
# manage remainder images
if nimgs%4 > 0:
r = int(np.floor(nimgs/4))
for c in range(nimgs%4,4):
axarr[r,c].axis('off')
f.set_figwidth(16)
f.set_figheight(4*np.ceil(nimgs/4))
@staticmethod
def plot_maxima_stack(masked_max, smooth_dist):
''' Creates a matplotlib plot object in which each slice from the image
is displayed as a single subplot, in a 4-by-n matrix (n depends upon
the number of slices in the image)'''
nimgs = masked_max.shape[0] # z axis of array dictates number of slices
# plot with 4 imgs across
# determine how many rows and columns of images there are
if nimgs < 5:
f, axarr = plt.subplots(1,nimgs)
for i in range(0,nimgs):
axarr[i].imshow(smooth_dist[i,:,:], cmap='gray')
axarr[i].imshow(masked_max[i,:,:], cmap='autumn')
axarr[i].xaxis.set_visible(False)
axarr[i].yaxis.set_visible(False)
f.set_figwidth(16)
f.set_figheight(4)
else:
f, axarr = plt.subplots(int(np.ceil(nimgs/4)),4)
for i in range(0,nimgs):
r = int(np.floor(i/4))
c = int(i%4)
axarr[r,c].imshow(smooth_dist[i,:,:], cmap='gray')
axarr[r,c].imshow(masked_max[i,:,:], cmap='autumn')
axarr[r,c].xaxis.set_visible(False)
axarr[r,c].yaxis.set_visible(False)
if nimgs%4 > 0:
r = int(np.floor(nimgs/4))
for c in range(nimgs%4, 4):
axarr[r,c].axis('off')
f.set_figwidth(16)
f.set_figheight(4*np.ceil(nimgs/4))
class PexSegmenter:
'''Class and methods for segmenting foci from multipage TIFF images.
When called, generates an object of class PexSegmenter with the
segmentation parameters defined as per Args for the image indicated. The
segment() method can then be called to perform segmentation, returning a
PexSegmentObj.
Args:
filename (str): The filename of the multipage TIFF-format image to be
segmented. This is defined with respect to the current working
directory.
seg_method (str, optional): The segmentation approach to use in a string
format. This can be one of the following:
threshold (default): Segmentation by setting an absolute cutoff for
minimum pixel intensity of an object. Requires kwargs dependent
upon the segmentation mode (see mode below).
canny: Segmentation by Canny edge detection. Requires kwargs
high_threshold and low_threshold to be set, or the default
values 1000 and 500 will be used.
mode (str, optional): The segmentation mode to be used if seg_method ==
threshold. This can be one of the following:
threshold (default): Uses a user-provided pixel intensity value as
the segmentation threshold. Requires kwarg threshold.
bg_scaled: Use the median value from all pixels that correspond to
cells within a CellSegmentObj (see CellSegment.py in this
module) segmented from another fluorescence channel as the
background. Requires kwargs cells and bg_diff.
g_xy(int, optional): Standard deviation in units of pixels (xy direction
only) for the 3D gaussian smoothing prior to segmentation. Defaults
to 1 pixel. May need empirical optimization depending upon the size
of desired particles and background noise.
g_z(int, optional): Standard deviation in units of Z-slices (z direction
only) for the 3D gaussian smoothing prior to segmentation. Defaults
to 1 slice. May need empirical optimization depending upon the
spacing between slices. Setting to 0 results in only 2D smoothing.
Additional kwargs:
threshold (int): Only relevant if using seg_method threshold and mode
threshold. The pixel intensity value to be used as a cutoff for
segmented objects.
cells (str): Only relevant if using seg_method threshold and mode
bg_scaled. The name of a CellSegmentObj already present in the
environment which corresponds to cells segmented from the same
field and in a separate channel. See CellSegment.py for details.
bg_diff (int): Only relevant if using seg_method threshold and mode
bg_scaled. The desired pixel intensity units above the cells'
median pixel intensity to set the threshold.
high_threshold (int, optional): Only relevant if using seg_method
"canny". The high threshold to be passed to skimage's canny edge
detector. Default value is 1000. See skimage.feature.canny
documentation for more details.
low_threshold (int, optional): Only relevant if using seg_method
"canny". The low threshold to be passed to skimage's canny edge
detector. Default value is 500. See skimage.feature.canny
documentation for more details.
Attributes:
filename (str): The filename provided in args.
seg_method (str): The segmentation method provided in args, defaults to
'threshold'.
mode (str): The segmentation mode provided in args, defaults to
'threshold'.
g_xy (int): The gaussian filter xy standard deviation in pixels.
Defaults to 1.
g_z (int): The gaussian filter z standard deviation in slices.
Defaults to 1.
Additional attributes provided by **kwargs (see Args, Additional kwargs
above).
'''
def __init__(self, filename='', src_data=None, seg_method='threshold',
mode='threshold', g_xy=1, g_z=1, **kwargs):
# set attributes
if src_data is None and filename == '':
raise ValueError('no input image provided.')
self.filename = filename
self.src_data = src_data
self.seg_method = seg_method
self.mode = mode
self.g_xy = g_xy
self.g_z = g_z
for key, value in kwargs.items():
setattr(self, key, value)
if self.seg_method == 'canny':
if not self.mode == 'absolute' or self.mode == 'scaled':
self.mode = 'absolute' # if it wasn't set, use absolute
self.high_threshold = kwargs.get('high_threshold',1000)
self.low_threshold = kwargs.get('low_threshold',500)
if self.seg_method == 'threshold':
if mode == 'threshold':
self.threshold = kwargs.get('threshold',float('nan'))
if np.isnan(self.threshold):
raise ValueError('A threshold argument must be provided to segment with a constant threshold.')
if mode == 'bg_scaled':
self.cells = kwargs.get('cells', '')
self.bg_diff = float(kwargs.get('bg_diff',float('nan')))
if self.cells == '':
raise ValueError('A CellSegmentObj containing segmented cells is required if mode == bg_scaled.')
if np.isnan(self.bg_diff):
raise ValueError('a bg_diff argument is needed if mode == bg_scaled.')
def segment(self, fill_holes=False, edt_sampling=(3,1,1),
edt_smooth=[1,3,3]):
"""Segment objects within the image according to attributes provided.
Yields: a PexSegmentObj containing segmented objects as well as all
images generated during segmentation (for post-hoc analysis) as
well as relevant values, e.g. numbers and names of segmented
particles. See PexSegmentObj documentation for more details.
"""
starttime = time.time() # begin timing
f_directory = os.getcwd()
pdout = [] # list of PexSegmentObj attributes to pass to pandas for csv
# data import
if self.filename != '':
print('reading' + self.filename)
raw_img = io.imread(self.filename)
elif self.src_data is not None:
raw_img = self.src_data
print('raw image imported.')
if self.seg_method == 'pre-thresholded':
gaussian_img = raw_img
else:
# gaussian filter
print('performing gaussian filtering...')
gaussian_img = gaussian_filter(raw_img,
[self.g_z, self.g_xy, self.g_xy])
print('Image smoothed.')
print('preprocessing complete.')
## SEGMENTATION BY THRESHOLDING THE GAUSSIAN ##
if self.seg_method == 'threshold':
# binary thresholding and cleanup
print('thresholding...')
threshold_img = np.copy(gaussian_img)
if self.mode == 'threshold':
print('mode = threshold.')
# make binary image
threshold_img[threshold_img < self.threshold] = 0
threshold_img[threshold_img > 0] = 1
print('thresholding complete.')
if fill_holes:
print('filling holes in objects.')
for i in range(0,threshold_img.shape[0]):
threshold_img[i, :, :] = binary_fill_holes(
threshold_img[i, :, :])
elif self.mode == 'bg_scaled':
print('mode = background-scaled.')
self.thresholds = {}
threshold_img = np.zeros(shape = raw_img.shape)
for i in self.cells.obj_nums:
if i == 0:
pass
else:
print('thresholding cell ' + str(i))
# get median for the cell
cell_median = np.median(gaussian_img[self.cells.final_cells == i])
# generate the thresholded binary mask for each cell
threshold_img[np.logical_and(self.cells.final_cells == i,
gaussian_img > cell_median + self.bg_diff)] = 1
self.thresholds[i] = cell_median + self.bg_diff #store val
print('thresholding complete.')
else:
raise ValueError('mode parameter must be bg_scaled or threshold.')
# distance and maxima transformation to find objects
# next two steps assume 100x objective and 0.2 um slices
print('generating distance map...')
dist_map = distance_transform_edt(threshold_img, sampling = edt_sampling)
print('distance map complete.')
print('smoothing distance map...')
# smooth the distance map
smooth_dist = gaussian_filter(dist_map, edt_smooth)
print('distance map smoothed.')
print('identifying maxima...')
# find local maxima in the smoothed distance map
# these will be the watershed seeds
max_strel = generate_binary_structure(3,2)
maxima = maximum_filter(smooth_dist,
footprint = max_strel) == smooth_dist
# clean up background and edges
bgrd_3d = smooth_dist == 0
eroded_bgrd = binary_erosion(bgrd_3d, structure = max_strel,
border_value = 1)
maxima = np.logical_xor(maxima, eroded_bgrd)
print('maxima identified.')
# watershed segmentation
labs = self.watershed_labels(maxima)
print('watershedding...')
peroxisomes = watershed(-smooth_dist, labs, mask = threshold_img)
print('watershedding complete.')
if self.mode == 'bg_scaled':
# find cell boundaries and define objects that are on the
# edges, then assign segmented objects to parent cells
edge_struct = generate_binary_structure(3,1)
self.c_edges = {}
print('finding edges of cells...')
for i in self.cells.obj_nums:
self.c_edges[i] = np.logical_xor(self.cells.final_cells == i,
binary_erosion(self.cells.final_cells== i,
edge_struct))
print('cell edges found.')
self.primary_objs = [x for x in np.unique(peroxisomes) if x != 0]
self.parent = {}
self.obj_edges = {}
self.on_edge = {}
pex_mask = peroxisomes != 0
for obj in self.primary_objs:
self.parent[obj] = self.cells.final_cells[labs == obj][0]
obj_mask = peroxisomes == obj
obj_edge = np.logical_xor(obj_mask,
binary_erosion(obj_mask,
edge_struct))
self.obj_edges[obj] = obj_edge
# test if the object's edge and its cell's edge overlap
if np.any(np.logical_and(obj_edge,
self.c_edges[self.parent[obj]])):
self.on_edge[obj] = True
print('object on the edge: ' + str(obj))
print('parent cell: ' + str(self.parent[obj]))
new_obj = obj_mask
search_obj = obj_mask
tester = 0
iteration = 1
while tester == 0:
# TODO: FIX THIS BLOCK OF CODE! GETTING STUCK WITHIN
# IT! NOT SURE HOW MANY ITERATIONS ITS DOING, OR FOR
# HOW MANY DIFFERENT PEROXISOMES.
new_px = binary_dilation(search_obj, edge_struct)
new_px[np.logical_or(new_obj, pex_mask)] = False
print('iteration: ' + str(iteration))
# print('new pixels for iteration ' + str(iteration) + \
# ': ')
# print(np.nonzero(new_px))
if np.any(gaussian_img[new_px] >
self.thresholds[self.parent[obj]]):
to_add = np.logical_and(new_px, gaussian_img >
self.thresholds[self.parent[obj]])
new_obj = np.logical_or(new_obj, to_add)
# print('object pixels after iteration '
# + str(iteration) + ': ')
# print(np.nonzero(new_obj))
search_obj = to_add # only search from new pixels
else:
peroxisomes[new_obj] = obj
tester = 1
iteration = iteration + 1
else:
self.on_edge[obj] = False
elif self.seg_method == 'canny':
## EDGE-DETECTION BASED SEGMENTATION ##
threshold_img = np.empty_like(gaussian_img)
edge_img = np.empty_like(gaussian_img)
c_strel = generate_binary_structure(2,1)
# perform canny edge detection on each slice s
for s in range(0,gaussian_img.shape[0]):
if self.mode == 'absolute':
c = canny(gaussian_img[s, :, :],
sigma=0,
low_threshold=self.low_threshold,
high_threshold=self.high_threshold)
elif self.mode == 'scaled':
c = canny(gaussian_img[s, :, :],
sigma=0,
low_threshold=self.low_threshold,
high_threshold=self.high_threshold,
use_quantiles=True)
# clean up object edges that have gaps
c = binary_closing(c,c_strel)
edge_img[s,:,:] = np.copy(c)
# fill holes to generate binary mask of objects
c = binary_fill_holes(c)
c = binary_opening(c, c_strel) # eliminate incomplete lines
threshold_img[s,:,:] = c
print('generating distance map...')
dist_map = distance_transform_edt(threshold_img, sampling = (3,1,1))
print('distance map complete.')
print('smoothing distance map...')
smooth_dist = gaussian_filter(dist_map, [1,2,2])
print('distance map smoothed.')
print('identifying maxima...')
max_strel = generate_binary_structure(3,2)
# identify local maxima (these will be the seed points for
# watershed segmentation)
maxima = maximum_filter(smooth_dist,
footprint = max_strel) == smooth_dist
# clean up background and edges
bgrd_3d = smooth_dist == 0
eroded_bgrd = binary_erosion(bgrd_3d, structure = max_strel,
border_value = 1)
maxima = np.logical_xor(maxima, eroded_bgrd)
print('maxima identified.')
# watershed segmentation
labs = self.watershed_labels(maxima)
print('watershedding...')
peroxisomes = watershed(-smooth_dist, labs, mask = threshold_img)
print('watershedding complete.')
if hasattr(self, 'cells'):
# assign segmented objects to cells if a CellSegmentObj was
# included
self.primary_objs = [x for x in np.unique(peroxisomes) \
if x != 0]
self.parent = {}
for obj in self.primary_objs:
o_parent = self.cells.final_cells[labs == obj][0]
if o_parent == 0:
self.primary_objs.remove(obj)
else:
self.parent[obj] = o_parent
elif self.seg_method == 'pre-thresholded':
threshold_img = np.copy(gaussian_img)
if fill_holes:
print('filling holes in objects.')
for i in range(0, threshold_img.shape[0]):
threshold_img[i, :, :] = binary_fill_holes(
threshold_img[i, :, :])
print('holes filled.')
dist_map = distance_transform_edt(threshold_img,
sampling=edt_sampling)
print('distance map complete.')
print('smoothing distance map...')
# smooth the distance map
smooth_dist = gaussian_filter(dist_map, edt_smooth)
print('distance map smoothed.')
print('identifying maxima...')
# find local maxima in the smoothed distance map
# these will be the watershed seeds
max_strel = generate_binary_structure(3, 2)
maxima = maximum_filter(smooth_dist,
footprint=max_strel) == smooth_dist
# clean up background and edges
bgrd_3d = smooth_dist == 0
eroded_bgrd = binary_erosion(bgrd_3d, structure= max_strel,
border_value=1)
maxima = np.logical_xor(maxima, eroded_bgrd)
print('maxima identified.')
# watershed segmentation
labs = self.watershed_labels(maxima)
print('watershedding...')
peroxisomes = watershed(-smooth_dist, labs, mask=threshold_img)
print('watershedding complete.')
# Sometimes the watershedding algorithm inaccurately separates objects
# on different Z-slices. The next section merges objects with
# significant overlap
for s in range(1,peroxisomes.shape[0]):
cslice = peroxisomes[s,:,:]
lslice = peroxisomes[s-1,:,:]
for obj in np.unique(cslice)[np.unique(cslice)!= 0]:
lslice_vals, cts = np.unique(lslice[cslice == obj],
return_counts = True)
lslice_vals = lslice_vals.tolist()
cts = cts.tolist()
ordered_by_ct = sorted(zip(lslice_vals, cts),
key = itemgetter(1))
if ordered_by_ct[-1][0] == 0 or ordered_by_ct[-1][0] == obj:
continue
else:
# if >75% of pixels in the slice below obj are from another
# object, change obj to that object #
if float(ordered_by_ct[-1][1])/cslice[cslice == obj].size>0.5:
peroxisomes[s,:,:][cslice == obj] = ordered_by_ct[-1][0]
obj_nums, volumes = np.unique(peroxisomes, return_counts=True)
volumes = dict(zip(obj_nums.astype('uint16'), volumes))
# remove the background
del volumes[0]
obj_nums = obj_nums.astype('uint16').tolist()
obj_nums.remove(0)
# generate dict of relevant parameters to pass to PexSegmentObj
mode_params = {}
if hasattr(self, 'parent'):
pdout.append('parent')
mode_params['parent'] = self.parent
if self.seg_method == 'canny':
mode_params['high_threshold'] = self.high_threshold
mode_params['low_threshold'] = self.low_threshold
mode_params['edges'] = edge_img
pdout.append('volumes')
if self.seg_method == 'threshold':
if self.mode == 'threshold':
mode_params['threshold'] = self.threshold
pdout.append('volumes')
elif self.mode == 'bg_scaled':
mode_params['thresholds'] = self.thresholds
mode_params['bg_diff'] = self.bg_diff
mode_params['cells'] = self.cells
mode_params['cell_edges'] = self.c_edges
mode_params['cell_nums'] = self.cells.obj_nums
mode_params['obj_edges'] = self.obj_edges
mode_params['on_edge'] = self.on_edge
for x in ['thresholds','on_edge','parent', 'volumes']:
pdout.append(x)
return PexSegmentObj(f_directory, self.filename, raw_img,
gaussian_img, self.seg_method, self.mode,
threshold_img, dist_map, smooth_dist, maxima,
labs, peroxisomes, obj_nums, volumes,
to_pdout=pdout, mode_params=mode_params)
## HELPER METHODS ##
@staticmethod
def watershed_labels(maxima_img):
'''Number local maxima in order for use in watershedding.
Args:
maxima_img (np.ndarray): A boolean array with local maxima labeled
as true pixels.
Yields:
A numpy ndarray with maxima numbered sequentially.
'''
max_z, max_y, max_x = np.nonzero(maxima_img)
label_output = np.zeros(maxima_img.shape)
for i in range(0,len(max_y)):
label_output[max_z[i],max_y[i],max_x[i]] = i+1
return(label_output)
|
nrweir/pyto_segmenter
|
PexSegment.py
|
Python
|
gpl-3.0
| 45,265
|
[
"Gaussian"
] |
f919a0d16ad36086ea1c30a970e970cf1a9c530813f694993389c9683d6e23e4
|
"""
This module and its submodules contains utilities for running external
processes and interfacing with job managers. This module should contain
functionality shared between Galaxy and the Pulsar.
"""
from galaxy.util.bunch import Bunch
from .kill import kill_pid
__all__ = ('kill_pid', 'Bunch')
|
galaxyproject/pulsar
|
pulsar/managers/util/__init__.py
|
Python
|
apache-2.0
| 299
|
[
"Galaxy"
] |
532710d3c186285b2c2892d12064c45cd7463dec15f6459a34b572cfa7f86b50
|
"""
Creates all figures given directory of AIS-MCMC result csvs
"""
import matplotlib
import matplotlib.pyplot as plt; plt.ion()
import seaborn as sns
import pandas as pd
import numpy as np
import os, argparse, pyprind
# set global default font sizes
fontsize = 16
matplotlib.rcParams['xtick.labelsize'] = fontsize-3
matplotlib.rcParams['ytick.labelsize'] = fontsize-3
matplotlib.rcParams['font.size'] = fontsize
# set up color scheme
vb_color = sns.color_palette()[0]
mc_color = sns.color_palette()[1]
# parameter names
continuous_params = ['position', 'flux_r_nmgy', #'flux_r_mag',
'color_ug', 'color_gr', 'color_ri', 'color_iz',
'gal_frac_dev', 'gal_axis_ratio',
'gal_radius_px', 'gal_angle_deg']
#####################
# Uncertainty table #
#####################
def make_calibration_tables(results_dir):
""" save calibration (within x sds) table to a latex-format table """
def save_table(stub="uscore_mc.csv"):
uscoredf = pd.read_csv(os.path.join(results_dir, stub))
uscoredf.rename(columns={'field': 'parameter',
'within_half_sd': "within 1/2 sd",
'within_1_sd' : "1 sd",
'within_2_sd' : "2 sd",
'within_3_sd' : "3 sd"}, inplace=True)
uscoredf.set_index("parameter", inplace=True)
uscoredf.rename(index={'log_flux_r_nmgy': 'log r-flux',
'color_ug' : 'color ug',
'color_gr' : 'color gr',
'color_ri' : 'color ri',
'color_iz' : 'color iz'}, inplace=True)
print(uscoredf.head())
formatters = [lambda x: "%2.3f"%x for _ in range(4)]
fout = os.path.splitext(stub)[0]
uscoredf.to_latex(os.path.join(results_dir, fout + ".tex"), formatters=formatters)
save_table("uscore_vb.csv")
save_table("uscore_mc.csv")
##################
# figure methods #
##################
def make_est_vs_error_plots(results_dir):
print(".... making Error Plots --- Takes a few minutes to render")
import pyprind
# load in matched dataframes, remove NANs
truedf = pd.read_csv(os.path.join(results_dir, "matched_truth.csv"))
vbdf = pd.read_csv(os.path.join(results_dir, "matched_vb.csv"))
mcdf = pd.read_csv(os.path.join(results_dir, "matched_mc.csv"))
vbdf['log_flux_r'] = np.log(vbdf.flux_r_nmgy)
# fix gal angles
mcdf['gal_angle_deg'][mcdf['gal_angle_deg'] < 0.] += 180.
# only compare inferences when all truth, VB and MC agree
star_idxs = (truedf.is_star) & (vbdf.is_star > .5) & (mcdf.is_star > .5)
gal_idxs = (~truedf.is_star) & (vbdf.is_star <= .5) & (mcdf.is_star <= .5)
# remove nan obs
bad_idx = (np.isnan(vbdf.flux_r_nmgy) | np.isnan(mcdf.flux_r_nmgy)).values
def scatter_error(x, y, yerr, marker, label, c, alpha=.5, ax=None):
if ax is None:
fig, ax = plt.figure(figsize=(8,6)), plt.gca()
ax.errorbar(x, y, yerr=yerr, ecolor=c, fmt="none")
ax.scatter(x, y, marker=marker, label=label, c=c, s=3)
return ax
def plot_param_source(param_name, source_type="star"):
#source_type = "gal"
#param_name = "log_flux_r"
if source_type == "star":
idxs = star_idxs & (~bad_idx)
elif source_type == "gal":
idxs = gal_idxs & (~bad_idx)
else:
raise Exception("star|gal")
# values to compare
true_vals = truedf[param_name][idxs].values
mc_means = mcdf[param_name][idxs].values
vb_means = vbdf[param_name][idxs].values
# wrap gal angles
if param_name == 'gal_angle_deg':
mc_wrapped = np.column_stack([ mc_means, mc_means-180, mc_means+180 ])
mc_dist = np.abs(mc_wrapped-true_vals[:,None])
mc_idx = np.argmin(mc_dist, axis=1)
mc_means = np.array([ mc[i] for i,mc in zip(mc_idx, mc_wrapped) ])
vb_wrapped = np.column_stack([ vb_means, vb_means-180, vb_means+180])
vb_dist = np.abs(vb_wrapped-true_vals[:,None])
vb_idx = np.argmin(vb_dist, axis=1)
vb_means = np.array([ vb[i] for i,vb in zip(vb_idx, vb_wrapped) ])
print("--- param %s, (source %s) ---- "%(param_name, source_type))
print(" true vals have %d " % np.sum(pd.isnull(true_vals)))
print(" vb preds have %d " % np.sum(pd.isnull(vb_means)))
print(" mc preds have %d " % np.sum(pd.isnull(mc_means)))
if param_name in qq_params:
mc_errs = 2*mcdf[param_name+"_stderr"][idxs].values
vb_errs = 2*vbdf[param_name+"_stderr"][idxs].values
else:
mc_errs = None
vb_errs = None
# higlight stars and gals
fig, axarr = plt.subplots(1, 2, figsize=(9, 3.75))
lo = min(np.nanmin(true_vals), np.nanmin(mc_means), np.nanmin(vb_means))
hi = max(np.nanmax(true_vals), np.nanmax(mc_means), np.nanmax(vb_means))
print("lo, hi", lo, hi)
for ax in axarr.flatten():
ax.plot([lo, hi], [lo, hi], "--", c='grey', linewidth=2)
scatter_error(true_vals, vb_means, vb_errs,
marker='o', label="VB (%s)"%source_type, c=vb_color, ax=axarr[0])
scatter_error(true_vals, mc_means, mc_errs,
marker='o', label="MCMC (%s)"%source_type, c=mc_color, ax=axarr[1])
if False: #compare_to_photo:
axarr[0].set_xlabel("coadd value", fontsize=fontsize)
axarr[1].set_xlabel("coadd value", fontsize=fontsize)
else:
axarr[0].set_xlabel("ground truth", fontsize=fontsize)
axarr[1].set_xlabel("ground truth", fontsize=fontsize)
axarr[0].set_ylabel("VI predicted", fontsize=fontsize)
axarr[1].set_ylabel("MCMC predicted", fontsize=fontsize)
#axarr[0].tick_params(labelsize=args.fontsize-2)
#axarr[1].tick_params(labelsize=args.fontsize-2)
#fig.suptitle(param_name + " (%s) "%source_type)
fig.tight_layout()
fig.savefig(os.path.join(results_dir, "error-scatter-%s-%s.png"%(param_name, source_type)), bbox_inches='tight', dpi=200)
if 'objid' in truedf.columns:
# Print out some bad object ids for outiers
rmses = np.abs(true_vals - mc_means)
worst_idx = np.argsort(rmses)[::-1][:10]
from collections import OrderedDict
print("Objects with highest error for %s-%s params"%(source_type, param_name))
print(pd.DataFrame(OrderedDict([
('objid', truedf[idxs].objid.iloc[worst_idx]),
('rmses', rmses[worst_idx]),
('true', true_vals[worst_idx]),
('mc' , mc_means[worst_idx]),
('vb' , vb_means[worst_idx])])))
qq_params = ["log_flux_r", "color_ug", "color_gr", "color_ri", "color_iz"]
for source_type in ["star", "gal"]:
for param_name in qq_params:
plot_param_source(param_name, source_type=source_type)
gal_params = [ 'gal_frac_dev', 'gal_axis_ratio',
'gal_radius_px', 'gal_angle_deg']
for gp in gal_params:
plot_param_source(gp, source_type="gal")
def make_error_comparison_figs(results_dir, source_type="star",
error_type="abs", compare_to_photo=False):
""" Make Violin Plot Error Comparison figures """
# construct parameter data frame
def param_df(param_name, truedf, vbdf, mcdf, photodf=None, error_type="abs"):
""" create error df for different methods """
if param_name == 'position':
true_pos = np.column_stack([ truedf.ra.values, truedf.dec.values ])
def dist(df):
pos = np.column_stack([ df.ra.values, df.dec.values ])
pixel_error = position_error(truedf.ra.values, truedf.dec.values,
df.ra.values, df.dec.values)
return pixel_error
# return np.sqrt(np.sum((true_pos - pos)**2, axis=1))*3600.
dvb, dmc = dist(vbdf), dist(mcdf)
method_list = np.concatenate([['vb']*len(dvb), ['mc']*len(dmc)])
error_list = np.concatenate([ dvb, dmc ])
if photodf is not None:
dphoto = dist(photodf)
method_list = np.concatenate([method_list, ['photo']*len(dphoto)])
error_list = np.concatenate([error_list, dphoto])
elif param_name=="flux_r_nmgy":
dmc = np.log(truedf[param_name].values) - np.log(mcdf[param_name].values)
dvb = np.log(truedf[param_name].values) - np.log(vbdf[param_name].values)
method_list = np.concatenate([['vb']*len(dvb), ['mc']*len(dmc)])
error_list = np.concatenate([ dvb, dmc ])
if photodf is not None:
dphoto = np.log(truedf[param_name].values) - np.log(photodf[param_name].values)
method_list = np.concatenate([method_list, ['photo']*len(dphoto)])
error_list = np.concatenate([error_list, dphoto])
elif param_name=="gal_angle_deg":
def angle_error(true, pred):
#pred_wrapped = np.column_stack([ pred, pred-180, pred+180])
#diffs = np.abs(pred_wrapped-true[:,None])
#min_idx = np.argmin(diffs, axis=1)
#pred_fixed = np.array([ mc[i] for i,mc in zip(min_idx, pred_wrapped)])
diffs = np.column_stack([true - pred,
true-(pred-180.),
true-(pred+180.)])
mini = np.argmin(np.abs(diffs), axis=1)
return diffs[np.arange(len(mini)), mini]
dmc = angle_error(truedf[param_name].values, mcdf[param_name].values)
dvb = angle_error(truedf[param_name].values, vbdf[param_name].values)
method_list = np.concatenate([['vb']*len(dvb), ['mc']*len(dmc)])
error_list = np.concatenate([ dvb, dmc ])
if photodf is not None:
dphoto = angle_error(truedf[param_name].values, photodf[param_name].values)
method_list = np.concatenate([method_list, ['photo']*len(dphoto)])
error_list = np.concatenate([error_list, dphoto])
else:
dmc = truedf[param_name].values - mcdf[param_name].values
dvb = truedf[param_name].values - vbdf[param_name].values
method_list = np.concatenate([['vb']*len(dvb), ['mc']*len(dmc)])
error_list = np.concatenate([ dvb, dmc ])
if photodf is not None:
dphoto = truedf[param_name].values - photodf[param_name].values
method_list = np.concatenate([method_list, ['photo']*len(dphoto)])
error_list = np.concatenate([error_list, dphoto])
if error_type=="abs":
error_list = np.abs(error_list)
# consistent naming
outdf = pd.DataFrame({'method': method_list, 'error':error_list})
outdf.method[outdf.method=='mc'] = 'MCMC'
outdf.method[outdf.method=='vb'] = 'VI'
return outdf
##################
# start function #
##################
# load in matched dataframes, remove NANs
truedf = pd.read_csv(os.path.join(results_dir, "matched_truth.csv"))
vbdf = pd.read_csv(os.path.join(results_dir, "matched_vb.csv"))
mcdf = pd.read_csv(os.path.join(results_dir, "matched_mc.csv"))
vbdf['log_flux_r'] = np.log(vbdf.flux_r_nmgy)
mcdf['gal_angle_deg'][mcdf['gal_angle_deg'] < 0.] += 180. # fix gal angles
# only compare inferences when all truth, VB and MC agree
star_idxs = (truedf.is_star) & (vbdf.is_star > .5) & (mcdf.is_star > .5)
gal_idxs = (~truedf.is_star) & (vbdf.is_star <= .5) & (mcdf.is_star <= .5)
# remove nan obs
bad_idx = (np.isnan(vbdf.flux_r_nmgy) | np.isnan(mcdf.flux_r_nmgy)).values
# match up stars to gals
truedf = pd.concat([ truedf[star_idxs & (~bad_idx)], truedf[gal_idxs &(~bad_idx)] ])
vbdf = pd.concat([ vbdf[star_idxs & (~bad_idx)], vbdf[gal_idxs &(~bad_idx)] ])
mcdf = pd.concat([ mcdf[star_idxs & (~bad_idx)], mcdf[gal_idxs &(~bad_idx)] ])
if compare_to_photo:
photodf = pd.read_csv(os.path.join(results_dir, "matched_photo.csv"))
photodf = pd.concat([ photodf[star_idxs & (~bad_idx)], photodf[gal_idxs &(~bad_idx)] ])
pretty_labels = {'flux_r_nmgy' : "brightness",
'color_ug' : "color u-g",
'color_gr' : "color g-r",
'color_ri' : "color r-i",
'color_iz' : "color i-z",
'gal_frac_dev' : "profile", #'gal_frac_dev': "de vaucouleurs frac (gal)",
'gal_axis_ratio': "axis",
'gal_radius_px' : "radius",
'gal_angle_deg' : "angle",
'position' : "position" }
fig, axarr = plt.subplots(2, int(len(continuous_params)/2), figsize=(12, 6))
for ax, cp in zip(axarr.flatten(), continuous_params):
if compare_to_photo:
pdf = param_df(cp, truedf, vbdf, mcdf, photodf=photodf, error_type=error_type)
else:
pdf = param_df(cp, truedf, vbdf, mcdf, photodf=None, error_type=error_type)
print(pdf.min())
vp = sns.violinplot(x="method", y="error", data=pdf, ax=ax, bw=.2) #'scott')
ax.set_xlabel(pretty_labels[cp])
vp.tick_params(labelsize=11)
# make sure we use sci notation for small numbers
if cp == "position":
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
if cp is not "position":
ylo, yhi = ax.get_ylim()
if error_type=="abs":
ylim = np.nanpercentile(pdf['error'], [0, 97.5])
elif error_type=="diff":
ylim = np.nanpercentile(pdf['error'], [2.5, 97.5])
ax.set_ylim(ylim)
fig.tight_layout()
fname = os.path.join(results_dir, "error_vb_mc_comparison_%s-%s.png"%(source_type, error_type))
fig.savefig(fname, bbox_inches='tight', dpi=200)
############################################################
# also save error table (with significance highlighting?) #
############################################################
edfs, zdfs = [], []
sample_sizes = []
for cp in continuous_params:
if compare_to_photo:
pdf = param_df(cp, truedf, vbdf, mcdf, photodf=photodf, error_type=error_type)
else:
pdf = param_df(cp, truedf, vbdf, mcdf, photodf=None, error_type=error_type)
pdf['param'] = [pretty_labels[cp]]*len(pdf)
edfs.append(pdf)
# compute z score errors
zdf = pdf.copy()
zdf['error'] /= zdf.error.std()
zdfs.append(zdf)
sample_sizes.append(np.sum(~zdf.error.isnull()))
edf = pd.concat(edfs, 0)
#zdf = pd.concat(zdfs, 0)
meandf = edf.groupby(["param", "method"], sort=False).mean()
countdf = edf.groupby(["param", "method"], sort=False).count()
meandf['error stdev'] = edf.groupby(["param", "method"]).std() / np.sqrt(countdf)
meandf.reset_index(inplace=True)
print(meandf)
# construct tex table
methods = ["MCMC", "VI"]
if compare_to_photo:
methods += ["photo"]
def create_pairs(zdf):
dfs = {m: zdf[zdf.method==m].reset_index(drop=True) for m in methods}
names = []
df_list = []
if compare_to_photo:
pv_delta = dfs["photo"].copy()
pv_delta.error -= dfs["VI"].error
pm_delta = dfs["photo"].copy()
pm_delta.error -= dfs["MCMC"].error
names += ["photo-VI", "photo-MCMC"]
df_list += [pv_delta, pm_delta]
vm_delta = dfs["VI"].copy()
vm_delta.error -= dfs["MCMC"].error
names += ["VI-MCMC"]
df_list += [vm_delta]
str_cols = []
for ddf, name in zip(df_list, names):
emean = ddf.groupby(["param"], sort=False).mean().error
ecnt = ddf.groupby(["param"], sort=False).count().error
estd = ddf.groupby(["param"], sort=False).std().error / np.sqrt(ecnt)
str_cols.append(["%2.4f ($\pm$ %2.3f)"%(e, s)
for e, s in zip(emean, estd)])
from collections import OrderedDict
odf = pd.DataFrame(OrderedDict(zip(names, str_cols)), index=emean.index)
return odf
str_cols = []
for m in methods:
mdf = meandf[meandf.method==m]
#mstr = ["%2.3f $\pm$ %2.2f" % (e,s)
# for e,s in zip(mdf.error, mdf['error stdev'])]
mstr = ["%2.3f" % e for e,s in zip(mdf.error, mdf['error stdev'])]
str_cols.append(mstr)
outdf = pd.DataFrame({m: s for m, s in zip(methods, str_cols)}, index=mdf.param)
pairdf = create_pairs(edf)
totaldf = pd.concat([outdf, pairdf], axis=1)
print("Totaldf: ", totaldf)
# fout = os.path.splitext(stub)[0]
outdf.to_latex(os.path.join(results_dir, "error_vb_mc_comparison.tex"), escape=False)
totaldf.to_latex(os.path.join(results_dir, "error_vb_mc_comparison-pair.tex"), escape=False)
#################
# ROC Curves #
#################
def make_star_gal_roc_curves(results_dir, compare_to_photo=False):
""" Star/Gal ROC curves """
from sklearn.metrics import roc_curve, roc_auc_score
pstardf = pd.read_csv(os.path.join(results_dir, "pstardf.csv"))
N = len(pstardf)
Y = pstardf['true_star'].values
pmc = pstardf['pstar_mc'].values
pvb = pstardf['pstar_vb'].values
vb_col = sns.color_palette()[0]
mc_col = sns.color_palette()[1]
fig, ax = plt.figure(figsize=(6,4)), plt.gca()
mc_fpr, mc_tpr, thresh = roc_curve(Y, pmc)
vb_fpr, vb_tpr, thresh = roc_curve(Y, pvb)
ax.plot(mc_fpr, mc_tpr, label="MCMC", linewidth=3)
ax.plot(vb_fpr, vb_tpr, label="VI", linewidth=3)
if compare_to_photo:
photo_fpr = np.sum((photodf.is_star) & (~truedf.is_star)) / np.sum(~truedf.is_star)
photo_tpr = np.sum((photodf.is_star) & (truedf.is_star)) / np.sum(truedf.is_star)
ax.scatter(photo_fpr, photo_tpr, marker="x", color='red', linewidth=2, s=35, label="Photo")
ax.legend(fontsize=fontsize)
ax.set_xlim(0, .4)
ax.set_ylim(.6, 1.)
ax.set_xlabel("False Positive Rate", fontsize=fontsize)
ax.set_ylabel("True Positive Rate")
fig.savefig(os.path.join(results_dir, "pstar_roc_comparison.png"), bbox_inches='tight', dpi=200)
# compute AUC for MC and VB
def make_mc_vb_auc_df():
mc_aucs, vb_aucs = [], []
for i in range(5000):
idx = np.random.choice(len(Y), size=len(Y))
mc_aucs.append(roc_auc_score(Y[idx], pmc[idx]))
vb_aucs.append(roc_auc_score(Y[idx], pvb[idx]))
return pd.DataFrame({'inference method': ['MCMC']*len(mc_aucs) + ['VI']*len(vb_aucs),
'AUC' : np.concatenate([mc_aucs, vb_aucs])})
aucdf = make_mc_vb_auc_df()
mc_auc = roc_auc_score(Y, pmc)
vb_auc = roc_auc_score(Y, pvb)
mc_aucs = aucdf['AUC'][aucdf['inference method'] == 'MCMC']
vb_aucs = aucdf['AUC'][aucdf['inference method'] == 'VI']
print("======= Star vs. Galaxy AUC Scores ==========")
print(" MC : %2.4f [%2.4f, %2.4f] "%(mc_auc, np.percentile(mc_aucs, 2.5), np.percentile(mc_aucs, 97.5)))
print(" VB : %2.4f [%2.4f, %2.4f] "%(vb_auc, np.percentile(vb_aucs, 2.5), np.percentile(vb_aucs, 97.5)))
fig, ax = plt.figure(figsize=(6,4)), plt.gca()
sns.violinplot(x='inference method', y='AUC', data=aucdf)
fig.tight_layout()
fig.savefig(os.path.join(results_dir, "pstar_auc_comparison.png"), bbox_inches='tight', dpi=200)
def make_mcmc_vb_uncertainty_comparison_plots(results_dir, source_type="star", param_name="log_flux_r"):
""" Compare MCMC and VB posterior uncertainty on examples where the
prediction is way off.
- These plots show that MCMC have better uncertainty properties
"""
# load in matched dataframes, remove NANs
truedf = pd.read_csv(os.path.join(results_dir, "matched_truth.csv"))
vbdf = pd.read_csv(os.path.join(results_dir, "matched_vb.csv"))
mcdf = pd.read_csv(os.path.join(results_dir, "matched_mc.csv"))
vbdf['log_flux_r'] = np.log(vbdf.flux_r_nmgy)
mcdf['gal_angle_deg'][mcdf['gal_angle_deg'] < 0.] += 180.
# only compare inferences when all truth, VB and MC agree
star_idxs = (truedf.is_star) & (vbdf.is_star > .5) & (mcdf.is_star > .5)
gal_idxs = (~truedf.is_star) & (vbdf.is_star <= .5) & (mcdf.is_star <= .5)
# remove nan obs
bad_idx = (np.isnan(vbdf.flux_r_nmgy) | np.isnan(mcdf.flux_r_nmgy)).values
if source_type == "star":
idxs = star_idxs & (~bad_idx)
elif source_type == "gal":
idxs = gal_idxs & (~bad_idx)
else:
raise Exception("sourcetype = star|gal")
# find big errors
true_rmag = truedf[idxs][param_name].values
vb_rmag = vbdf[idxs][param_name].values
mc_rmag = mcdf[idxs][param_name].values
vb_stderr = vbdf[idxs][param_name+"_stderr"].values
mc_stderr = mcdf[idxs][param_name+"_stderr"].values
# find locations where VB is way off
vberr = np.abs(true_rmag - vb_rmag)
mcerr = np.abs(true_rmag - mc_rmag)
bad_idx = np.argsort(vberr)[::-1]
for idx in bad_idx[:10]:
print("two errs: ", vberr[idx], mcerr[idx])
vbmu, vbscale = vb_rmag[idx], vb_stderr[idx]
mcmu, mcscale = mc_rmag[idx], mc_stderr[idx]
true_val = true_rmag[idx]
print(vbmu, vbscale)
print(mcmu, mcscale)
from scipy.stats import norm
cvb, cmc, ctrue = sns.color_palette()[0], sns.color_palette()[1], sns.color_palette()[3]
fig, ax = plt.figure(figsize=(6,3)), plt.gca()
lo, hi = min(mcmu - 2.75*mcscale, vbmu-2.75*vbscale, true_val), \
max(mcmu + 2.75*mcscale, vbmu+2.75*vbscale, true_val)
xgrid = np.linspace(lo, hi, 1000)
ax.plot(xgrid, norm.pdf(xgrid, vbmu, vbscale), label="VI", linewidth=2, c=cvb)
ax.fill_between(xgrid, norm.pdf(xgrid, vbmu, vbscale), alpha=.5, color=cvb)
ax.plot(xgrid, norm.pdf(xgrid, mcmu, mcscale), "--", label="MCMC", linewidth=2, c=cmc)
ax.fill_between(xgrid, norm.pdf(xgrid, mcmu, mcscale), alpha=.5, color=cmc)
ax.scatter(true_val, 0., s=200, linewidth=3, marker='x', label="true", c=ctrue)
ax.set_xlabel("log brightness")
ax.legend(fontsize=fontsize)
fig.savefig(os.path.join(results_dir, "posterior-comparison-%s-%s-src-%d.png"%(source_type, param_name, idx)), bbox_inches='tight')
def make_timing_figures():
timedf = pd.read_csv('timing-output/timedf.csv')
# print time
vb_time = timedf[timedf.method=='vb'].time.iloc[0]
mc_timedf = timedf[timedf.method=='mc']
#mc_time = mc_timedf.time #np.concatenate([[.01], mc_timedf.time])
#ess_star = mc_timedf.ess_star #np.concatenate([[.01], mc_timedf.ess_star])
#ess_gal = mc_timedf.ess_gal #np.concatenate([[.01], mc_timedf.ess_gal])
#mc_nsamps = mc_timedf.nsamps #np.concatenate([[.01], mc_timedf.nsamps])
mc_time = np.concatenate([[0], mc_timedf.time])
ess_star = np.concatenate([[0], mc_timedf.ess_star])
ess_gal = np.concatenate([[0], mc_timedf.ess_gal])
mc_nsamps = np.concatenate([[0], mc_timedf.nsamps])
# MCMC moves vs time
fig, ax = plt.figure(figsize=(8,4)), plt.gca()
ax.plot(mc_time, mc_nsamps, label="mcmc-transitions", c=mc_color)
ylim = ax.get_ylim()
ax.plot([vb_time, vb_time], ylim, label="VI", c=vb_color)
ax.set_ylim(ylim)
ax.legend()
ax.set_xlabel("Time (seconds)", fontsize=14)
ax.set_ylabel("Number of MCMC Transitions")
fig.savefig('timing-output/time-nsamps-fig.png', bbox_inches='tight', dpi=200)
# Number of Effecive Samples
fig, ax = plt.figure(figsize=(8,4)), plt.gca()
ax.plot(mc_time, ess_star, "--o", label="star ess", c=mc_color)
ax.plot(mc_time, ess_gal, "-->", label="gal ess", c=mc_color)
ylim = ax.get_ylim()
ax.plot([vb_time, vb_time], ylim, label="VI", linewidth=2)
ax.set_ylim(ylim)
ax.legend()
ax.set_xlabel("Time (seconds)", fontsize=14)
ax.set_ylabel("Eff. Ind. Samples", fontsize=14)
fig.tight_layout()
fig.savefig('timing-output/time-ess-fig.png', bbox_inches='tight', dpi=200)
##################
# helper error #
##################
def angular_separation(lam1, phi1, lam2, phi2):
dlam = lam2 - lam1
sin_dlam = np.sin(np.deg2rad(dlam))
cos_dlam = np.cos(np.deg2rad(dlam))
sin_phi1 = np.sin(np.deg2rad(phi1))
sin_phi2 = np.sin(np.deg2rad(phi2))
cos_phi1 = np.cos(np.deg2rad(phi1))
cos_phi2 = np.cos(np.deg2rad(phi2))
hp = np.hypot(cos_phi2 * sin_dlam,
cos_phi1 * sin_phi2 - sin_phi1 *cos_phi2 * cos_dlam)
at = np.arctan2(hp, sin_phi1*sin_phi2 + cos_phi1*cos_phi2*cos_dlam)
return np.rad2deg(at)
def position_error(true_ra, true_dec, pred_ra, pred_dec):
ARCSEC_PER_DEGREE = 3600
SDSS_ARCSEC_PER_PIXEL = 0.396
return (ARCSEC_PER_DEGREE / SDSS_ARCSEC_PER_PIXEL) * \
angular_separation(true_ra, true_dec, pred_ra, pred_dec)
##############
# run script #
##############
if __name__=="__main__":
main()
|
jeff-regier/Celeste.jl
|
experiments/mcmc_scripts/make_mcmc_results_figures.py
|
Python
|
mit
| 25,522
|
[
"Galaxy"
] |
a909ee712a15fe522c355e409ec517764db49d4e7fcfc00571b98cd444c79646
|
import Scientific
assert [int(x) for x in Scientific.__version__.split('.')] >= [2, 8]
from ase.calculators.jacapo.jacapo import *
|
grhawk/ASE
|
tools/ase/calculators/jacapo/__init__.py
|
Python
|
gpl-2.0
| 131
|
[
"ASE"
] |
1641e1989e073c5d28747fdd9f40f0acf22423eb94962ac67d8f25932750a90b
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
import numpy as np
from scipy import stats
import vtk
import os
import argparse
import timeit
import pickle
import matplotlib.pyplot as plt
# #############################################################################
# Create the data
parser = argparse.ArgumentParser(description='Multivariate Functional Shape Data Analysis (MFSDA)')
parser.add_argument('--shapeDir', type=str, help='Directory with vtk files .vtk', required=True)
parser.add_argument('--outputMean', help='output directory', default='mean.vtk')
parser.add_argument('--outputModel', help='output filename for model', default='model.pickle')
parser.add_argument('--plot', type=int, help='plot PCA explained variance', default=0)
parser.add_argument('--min_explained', type=float, help='min described by pca components', default=0.98)
def readData(shapedir):
"""
Run the commandline script for MFSDA.
"""
"""+++++++++++++++++++++++++++++++++++"""
"""Step 1. load dataset """
print("loading data ......")
print("+++++++Read the surface shape data+++++++")
vtkdirshapes = os.listdir(shapedir)
y_design = []
numpoints = -1
nshape = 0
firstshapedata = 0
for vtkfilename in vtkdirshapes:
if vtkfilename.endswith((".vtk")):
print("Reading", vtkfilename)
reader = vtk.vtkPolyDataReader()
reader.SetFileName(os.path.join(shapedir, vtkfilename))
reader.Update()
shapedata = reader.GetOutput()
shapedatapoints = shapedata.GetPoints()
if firstshapedata == 0:
firstshapedata = shapedata
y_design.append([])
if numpoints == -1:
numpoints = shapedatapoints.GetNumberOfPoints()
if numpoints != shapedatapoints.GetNumberOfPoints():
print("WARNING! The number of points is not the same for the shape:", vtkfilename)
for i in range(shapedatapoints.GetNumberOfPoints()):
p = shapedatapoints.GetPoint(i)
y_design[nshape].append(p)
nshape+=1
y_design = np.array(y_design)
return y_design.reshape(y_design.shape[0], -1), firstshapedata
if __name__ == '__main__':
args = parser.parse_args()
start_all = timeit.default_timer()
X, shapedata = readData(args.shapeDir)
X_ = np.mean(X, axis=0, keepdims=True)
print(X.shape, X_.shape)
pointdata = X_.reshape(-1).reshape(-1, 3)
polydatapoints = shapedata.GetPoints()
ipoint = 0
for point in pointdata:
polydatapoints.SetPoint(ipoint, point[0], point[1], point[2])
ipoint += 1
writer = vtk.vtkPolyDataWriter()
meanshapeoutputfilename = args.outputMean
writer.SetFileName(meanshapeoutputfilename)
writer.SetInputData(shapedata)
writer.SetFileTypeToASCII()
writer.Update()
pca = PCA()
pca.fit(X - X_)
#min_explained = 0.98
sum_explained = 0.0
num_components = 0
for evr in pca.explained_variance_ratio_:
sum_explained += evr
num_components += 1
if sum_explained >= args.min_explained:
break
print("num_components=",num_components)
if args.plot != 0:
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# # Prediction
# n_components = [20, 40, 64]
# Cs = np.logspace(-4, 4, 3)
# # Parameters of pipelines can be set using ‘__’ separated parameter names:
# estimator = GridSearchCV(pipe,
# dict(pca__n_components=n_components,
# logistic__C=Cs))
# estimator.fit(X_digits, y_digits)
plt.axvline(num_components,
linestyle=':', label='n_components=' + str(num_components))
plt.legend(prop=dict(size=12))
plt.show()
pca = PCA(n_components=num_components)
X_pca = pca.fit_transform(X - X_)
print(pca.explained_variance_ratio_)
print(X_pca.shape)
X_pca_ = np.mean(X_pca, axis=0, keepdims=True)
X_pca_var = np.std(X_pca, axis=0, keepdims=True)
pca_model = {}
pca_model["pca"] = pca
pca_model["X_"] = X_
pca_model["X_pca_"] = X_pca_
pca_model["X_pca_var"] = X_pca_var
with open(args.outputModel, "wb") as outputfile:
pickle.dump(pca_model, outputfile)
stop_all = timeit.default_timer()
delta_time_all = str(stop_all - start_all)
print("The total elapsed time is " + delta_time_all)
|
pdedumast/ShapeVariationAnalyzer
|
src/py/fitlib/condyle_pca_decomposition.py
|
Python
|
apache-2.0
| 5,219
|
[
"VTK"
] |
1eca12b2e6c9a541029dcbd8477635bf0266e44f0b4238d8da16528ceb7e012e
|
# -*- coding: utf-8 -*-
# pylama:skip=1
r"""
Some information and hints about logs and log formats.
Logs analysis: https://www.wikiwand.com/en/Log_analysis
Make use of database models to avoid parsing logs again and again.
Also think to log rotation and archiving.
Real-time/past analysis:
- Number of visits and number of unique visitors
- Visit duration and last visits
- Authenticated users, and last authenticated visits
- Days of week and rush hours
- Domains/countries of host's visitors.
- Hosts list
- Number of page views
- Most viewed, entry, and exit pages
- File types
- OS used
- Browsers used
- Robots used
- HTTP referrer
- Search engines, key phrases and keywords
used to find the analyzed web site
- HTTP errors
- Some of the log analyzers also report on who is on the site,
conversion tracking, visit time and page navigation.
NCSA Common log format (https://www.wikiwand.com/en/Common_Log_Format):
127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326
A "-" in a field indicates missing data.
127.0.0.1 is the IP address of the client (remote host) which made the request to the server.
user-identifier is the RFC 1413 identity of the client.
frank is the userid of the person requesting the document.
[10/Oct/2000:13:55:36 -0700] is the date, time, and time zone that the request was received, by default in strftime format %d/%b/%Y:%H:%M:%S %z.
"GET /apache_pb.gif HTTP/1.0" is the request line from the client. The method GET, /apache_pb.gif the resource requested, and HTTP/1.0 the HTTP protocol.
200 is the HTTP status code returned to the client. 2xx is a successful response, 3xx a redirection, 4xx a client error, and 5xx a server error.
2326 is the size of the object returned to the client, measured in bytes.
Apache log examples:
https://ossec-docs.readthedocs.io/en/latest/log_samples/apache/apache.html
Apache error-log variables:
%% The percent sign
%a Client IP address and port of the request
%{c}a Underlying peer IP address and port of the connection (see the mod_remoteip module)
%A Local IP-address and port
%{name}e Request environment variable name
%E APR/OS error status code and string
%F Source file name and line number of the log call
%{name}i Request header name
%k Number of keep-alive requests on this connection
%l Loglevel of the message
%L Log ID of the request
%{c}L Log ID of the connection
%{C}L Log ID of the connection if used in connection scope, empty otherwise
%m Name of the module logging the message
%M The actual log message
%{name}n Request note name
%P Process ID of current process
%T Thread ID of current thread
%{g}T System unique thread ID of current thread (the same ID as displayed by e.g. top; currently Linux only)
%t The current time
%{u}t The current time including micro-seconds
%{cu}t The current time in compact ISO 8601 format, including micro-seconds
%v The canonical ServerName of the current server.
%V The server name of the server serving the request according to the UseCanonicalName setting.
\ (backslash space) Non-field delimiting space
% (percent space) Field delimiter (no output)
Gunicorn log variables:
%(h)s remote address
%(l)s '-'
%(u)s user name
%(t)s date of the request
%(r)s status line (e.g. GET / HTTP/1.1)
%(m)s request method
%(U)s URL path without query string
%(q)s query string
%(H)s protocol
%(s)s status
%(B)s response length
%(b)s response length or '-' (CLF format)
%(f)s referrer
%(a)s user agent
%(T)s request time in seconds
%(D)s request time in microseconds
%(L)s request time in decimal seconds
%(p)s process ID
%(i)s request header
%(o)s response header
uWSGI log variables:
%(uri) REQUEST_URI
%(method) REQUEST_METHOD
%(user) REMOTE_USER
%(addr) REMOTE_ADDR
%(host) HTTP_HOST
%(proto) SERVER_PROTOCOL
%(uagent) HTTP_USER_AGENT (starting from 1.4.5)
%(referer) HTTP_REFERER (starting from 1.4.5)
%(status) HTTP response status code
%(micros) response time in microseconds
%(msecs) response time in milliseconds
%(time) timestamp of the start of the request
%(ctime) ctime of the start of the request
%(epoch) the current time in Unix format
%(size) response body size + response headers size (since 1.4.5)
%(ltime) human-formatted (Apache style) request time (since 1.4.5)
%(hsize) response headers size (since 1.4.5)
%(rsize) response body size (since 1.4.5)
%(cl) request content body size (since 1.4.5)
%(pid) pid of the worker handling the request (since 1.4.6)
%(wid) id of the worker handling the request (since 1.4.6)
%(switches) number of async switches (since 1.4.6)
%(vars) number of CGI vars in the request (since 1.4.6)
%(headers) number of generated response headers (since 1.4.6)
%(core) the core running the request (since 1.4.6)
%(vsz) address space/virtual memory usage (in bytes) (since 1.4.6)
%(rss) RSS memory usage (in bytes) (since 1.4.6)
%(vszM) address space/virtual memory usage (in megabytes) (since 1.4.6)
%(rssM) RSS memory usage (in megabytes) (since 1.4.6)
%(pktsize) size of the internal request uwsgi packet (since 1.4.6)
%(modifier1) modifier1 of the request (since 1.4.6)
%(modifier2) modifier2 of the request (since 1.4.6)
%(metric.XXX) access the XXX metric value (see The Metrics subsystem)
%(rerr) number of read errors for the request (since 1.9.21)
%(werr) number of write errors for the request (since 1.9.21)
%(ioerr) number of write and read errors for the request (since 1.9.21)
%(tmsecs) timestamp of the start of the request in milliseconds since the epoch (since 1.9.21)
%(tmicros) timestamp of the start of the request in microseconds since the epoch (since 1.9.21)
%(var.XXX) the content of request variable XXX (like var.PATH_INFO, available from 1.9.21)
NginX log format:
$ancient_browser equals the value set by the ancient_browser_value directive, if a browser was identified as ancient
$arg_ argument in the request line
$args arguments in the request line
$binary_remote_addr client address in a binary form, value’s length is always 4 bytes for IPv4 addresses or 16 bytes for IPv6 addresses
$body_bytes_sent number of bytes sent to a client, not counting the response header; this variable is compatible with the “%B” parameter of the mod_log_config Apache module
$bytes_sent number of bytes sent to a client (1.3.8, 1.2.5)
$connection connection serial number (1.3.8, 1.2.5)
$connection_requests current number of requests made through a connection (1.3.8, 1.2.5)
$connections_active same as the Active connections value
$connections_reading same as the Reading value
$connections_waiting same as the Waiting value
$connections_writing same as the Writing value
$content_length “Content-Length” request header field
$content_type “Content-Type” request header field
$cookie_ the named cookie
$date_gmt current time in GMT. The format is set by the config command with the timefmt parameter
$date_local current time in the local time zone. The format is set by the config command with the timefmt parameter
$document_root root or alias directive’s value for the current request
$document_uri same as $uri
$fastcgi_path_info the value of the second capture set by the fastcgi_split_path_info directive. This variable can be used to set the PATH_INFO parameter
$fastcgi_script_name request URI or, if a URI ends with a slash, request URI with an index file name configured by the fastcgi_index directive appended to it. This variable can be used to set the SCRIPT_FILENAME and PATH_TRANSLATED parameters that determine the script name in PHP. For example, for the “/info/” request with the following directives
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME /home/www/scripts/php$fastcgi_script_name;
the SCRIPT_FILENAME parameter will be equal to “/home/www/scripts/php/info/index.php”
$geoip_area_code telephone area code (US only)
$geoip_city city name, for example, “Moscow”, “Washington”
$geoip_city_continent_code two-letter continent code, for example, “EU”, “NA”
$geoip_city_country_code two-letter country code, for example, “RU”, “US”
$geoip_city_country_code3 three-letter country code, for example, “RUS”, “USA”
$geoip_city_country_name country name, for example, “Russian Federation”, “United States”
$geoip_country_code two-letter country code, for example, “RU”, “US”
$geoip_country_code3 three-letter country code, for example, “RUS”, “USA”
$geoip_country_name country name, for example, “Russian Federation”, “United States”
$geoip_dma_code DMA region code in US (also known as “metro code”), according to the geotargeting in Google AdWords API
$geoip_latitude latitude
$geoip_longitude longitude
$geoip_org organization name, for example, “The University of Melbourne”
$geoip_postal_code postal code
$geoip_region two-symbol country region code (region, territory, state, province, federal land and the like), for example, “48”, “DC”
$geoip_region_name country region name (region, territory, state, province, federal land and the like), for example, “Moscow City”, “District of Columbia”
$gzip_ratio achieved compression ratio, computed as the ratio between the original and compressed response sizes
$host in this order of precedence: host name from the request line, or host name from the “Host” request header field, or the server name matching a request
$hostname host name
$http2 negotiated protocol identifier: “h2” for HTTP/2 over TLS, “h2c” for HTTP/2 over cleartext TCP, or an empty string otherwise
$http_ arbitrary request header field; the last part of the variable name is the field name converted to lower case with dashes replaced by underscores. Examples: $http_referer, $http_user_agent
$https “on” if connection operates in SSL mode, or an empty string otherwise
$invalid_referer Empty string, if the “Referer” request header field value is considered valid, otherwise “1”
$is_args “?” if a request line has arguments, or an empty string otherwise
$limit_rate setting this variable enables response rate limiting; see limit_rate
$memcached_key Defines a key for obtaining response from a memcached server
$modern_browser equals the value set by the modern_browser_value directive, if a browser was identified as modern
$msec current time in seconds with the milliseconds resolution (1.3.9, 1.2.6)
$msie equals “1” if a browser was identified as MSIE of any version
$nginx_version nginx version
$pid PID of the worker process
$pipe “p” if request was pipelined, “.” otherwise (1.3.12, 1.2.7)
$proxy_add_x_forwarded_for the “X-Forwarded-For” client request header field with the $remote_addr variable appended to it, separated by a comma. If the “X-Forwarded-For” field is not present in the client request header, the $proxy_add_x_forwarded_for variable is equal to the $remote_addr variable
$proxy_host name and port of a proxied server as specified in the proxy_pass directive
$proxy_port port of a proxied server as specified in the proxy_pass directive, or the protocol’s default port
$proxy_protocol_addr client address from the PROXY protocol header, or an empty string otherwise (1.5.12). the PROXY protocol must be previously enabled by setting the proxy_protocol parameter in the listen directive.
$proxy_protocol_port client port from the PROXY protocol header, or an empty string otherwise (1.11.0). the PROXY protocol must be previously enabled by setting the proxy_protocol parameter in the listen directive.
$query_string same as $args
$realip_remote_addr keeps the original client address (1.9.7)
$realip_remote_port keeps the original client port (1.11.0)
$realpath_root an absolute pathname corresponding to the root or alias directive’s value for the current request, with all symbolic links resolved to real paths
$remote_addr client address
$remote_port client port
$remote_user user name supplied with the Basic authentication
$request full original request line
$request_body request bod. The variable’s value is made available in locations processed by the proxy_pass, fastcgi_pass, uwsgi_pass, and scgi_pass directives.
$request_body_file name of a temporary file with the request body. At the end of processing, the file needs to be removed. To always write the request body to a file, client_body_in_file_only needs to be enabled. When the name of a temporary file is passed in a proxied request or in a request to a FastCGI/uwsgi/SCGI server, passing the request body should be disabled by the proxy_pass_request_body off, fastcgi_pass_request_body off, uwsgi_pass_request_body off, or scgi_pass_request_body off directives, respectively.
$request_completion “OK” if a request has completed, or an empty string otherwise
$request_filename file path for the current request, based on the root or alias directives, and the request URI
$request_id unique request identifier generated from 16 random bytes, in hexadecimal (1.11.0)
$request_length request length (including request line, header, and request body) (1.3.12, 1.2.7)
$request_method request method, usually “GET” or “POST”
$request_time request processing time in seconds with a milliseconds resolution (1.3.9, 1.2.6); time elapsed since the first bytes were read from the client
$request_uri full original request URI (with arguments)
$scheme request scheme, “http” or “https”
$secure_link The status of a link check. The specific value depends on the selected operation mode
$secure_link_expires The lifetime of a link passed in a request; intended to be used only in the secure_link_md5 directive
$sent_http_ arbitrary response header field; the last part of the variable name is the field name converted to lower case with dashes replaced by underscores
$server_addr an address of the server which accepted a request. Computing a value of this variable usually requires one system call. To avoid a system call, the listen directives must specify addresses and use the bind parameter.
$server_name name of the server which accepted a request
$server_port port of the server which accepted a request
$server_protocol request protocol, usually “HTTP/1.0”, “HTTP/1.1”, or “HTTP/2.0”
$session_log_binary_id current session ID in binary form (16 bytes)
$session_log_id current session ID
$slice_range the current slice range in HTTP byte range format, for example, bytes=0-1048575
$spdy SPDY protocol version for SPDY connections, or an empty string otherwise
$spdy_request_priority request priority for SPDY connections, or an empty string otherwise
$ssl_cipher returns the string of ciphers used for an established SSL connection
$ssl_client_cert returns the client certificate in the PEM format for an established SSL connection, with each line except the first prepended with the tab character; this is intended for the use in the proxy_set_header directive
$ssl_client_fingerprint returns the SHA1 fingerprint of the client certificate for an established SSL connection (1.7.1)
$ssl_client_i_dn returns the “issuer DN” string of the client certificate for an established SSL connection
$ssl_client_raw_cert returns the client certificate in the PEM format for an established SSL connection
$ssl_client_s_dn returns the “subject DN” string of the client certificate for an established SSL connection
$ssl_client_serial returns the serial number of the client certificate for an established SSL connection
$ssl_client_verify returns the result of client certificate verification: “SUCCESS”, “FAILED”, and “NONE” if a certificate was not present
$ssl_protocol returns the protocol of an established SSL connection
$ssl_server_name returns the server name requested through SNI (1.7.0)
$ssl_session_id returns the session identifier of an established SSL connection
$ssl_session_reused returns “r” if an SSL session was reused, or “.” otherwise (1.5.11)
$status response status (1.3.2, 1.2.2)
$tcpinfo_rtt,
$tcpinfo_rttvar,
$tcpinfo_snd_cwnd,
$tcpinfo_rcv_space information about the client TCP connection; available on systems that support the TCP_INFO socket option
$time_iso8601 local time in the ISO 8601 standard format (1.3.12, 1.2.7)
$time_local local time in the Common Log Format (1.3.12, 1.2.7)
$uid_got The cookie name and received client identifier
$uid_reset If the variable is set to a non-empty string that is not “0”, the client identifiers are reset. The special value “log” additionally leads to the output of messages about the reset identifiers to the error_log
$uid_set The cookie name and sent client identifier
$upstream_addr keeps the IP address and port, or the path to the UNIX-domain socket of the upstream server. If several servers were contacted during request processing, their addresses are separated by commas, e.g. “192.168.1.1:80, 192.168.1.2:80, unix:/tmp/sock”. If an internal redirect from one server group to another happens, initiated by “X-Accel-Redirect” or error_page, then the server addresses from different groups are separated by colons, e.g. “192.168.1.1:80, 192.168.1.2:80, unix:/tmp/sock : 192.168.10.1:80, 192.168.10.2:80”
$upstream_cache_status keeps the status of accessing a response cache (0.8.3). The status can be either “MISS”, “BYPASS”, “EXPIRED”, “STALE”, “UPDATING”, “REVALIDATED”, or “HIT”
$upstream_connect_time time spent on establishing a connection with an upstream server
$upstream_cookie_ cookie with the specified name sent by the upstream server in the “Set-Cookie” response header field (1.7.1). Only the cookies from the response of the last server are saved
$upstream_header_time time between establishing a connection and receiving the first byte of the response header from the upstream server
$upstream_http_ keep server response header fields. For example, the “Server” response header field is available through the $upstream_http_server variable. The rules of converting header field names to variable names are the same as for the variables that start with the “$http_” prefix. Only the header fields from the response of the last server are saved
$upstream_response_length keeps the length of the response obtained from the upstream server (0.7.27); the length is kept in bytes. Lengths of several responses are separated by commas and colons like addresses in the $upstream_addr variable
$upstream_response_time time between establishing a connection and receiving the last byte of the response body from the upstream server
$upstream_status keeps status code of the response obtained from the upstream server. Status codes of several responses are separated by commas and colons like addresses in the $upstream_addr variable
$uri current URI in request, normalized. The value of $uri may change during request processing, e.g. when doing internal redirects, or when using index files.
NginX error log format:
YYYY/MM/DD HH:MM:SS [LEVEL] PID#TID: *CID MESSAGE
With PID and TID being the logging process and thread id
and CID a number identifying a (probably proxied) connection,
probably a counter. The *CID part is optional.
"""
|
Pawamoy/django-meerkat
|
src/meerkat/logs/__init__.py
|
Python
|
isc
| 22,069
|
[
"VisIt"
] |
57f134dcf831e5560388d52ac8b88977e9746712ded6317f5484193296dc0838
|
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__)))
'''
added head source directory in path for import from any location and relative testing and pwd for open() relative files
'''
import time
import socket
from datetime import datetime as dtdt
from datetime import date as dtd
from dateutil.relativedelta import relativedelta
from elasticsearch import Elasticsearch
import subprocess
class Tools:
def __init__(self, s3=None):
if s3:
self.bucketname = os.environ.get('S3_BUCKET_NAME', '')
self.path_name_s3_billing = os.environ.get('S3_REPORT_PATH', '')
if(self.path_name_s3_billing == '/'):
self.path_name_s3_billing = ''
self.s3_report_name = os.environ.get('S3_REPORT_NAME', '')
self.s3 = s3
else:
pass
def check_elk_connection(self):
elasticsearch_socket = socket.socket()
logstash_socket = socket.socket()
kibana_socket = socket.socket()
connection_ok = False
for _ in range(15):
try:
print('Checking if Elasticsearch container has started to listen to 9200')
elasticsearch_socket.connect(('localhost', 9200))
print('Great Elasticsearch is listening on 9200, 9300 :)')
connection_ok = True
break
except Exception as e:
print("Something's wrong with Elasticsearch. Exception is %s" % (e))
print('I will retry after 4 seconds')
connection_ok = True
time.sleep(4)
for _ in range(15):
try:
print('Checking if Logstash container has started to listen to 6379')
logstash_socket.connect(('localhost', 6379))
print('Great Logstash is listening on 6379 :)')
connection_ok = True
break
except Exception as e:
print("Something's wrong with Logstash. Exception is %s" % (e))
print('I will retry after 4 seconds')
connection_ok = True
time.sleep(4)
for _ in range(15):
try:
print('Checking if Kibana container has started to listen to 5601')
kibana_socket.connect(('localhost', 5601))
print('Great Kibana is listening on 5601 :)')
connection_ok = True
break
except Exception as e:
print("Something's wrong with Kibana. Exception is %s" % (e))
print('I will retry after 4 seconds')
connection_ok = True
time.sleep(4)
elasticsearch_socket.close()
logstash_socket.close()
kibana_socket.close()
return connection_ok
def index_template(self):
out = subprocess.check_output(['curl -XHEAD -i "localhost:9200/_template/aws_billing"'], shell=True,
stderr=subprocess.PIPE)
if '200 OK' not in out.decode():
status = subprocess.Popen(
[
'curl -XPUT localhost:9200/_template/aws_billing -d "`curl https://raw.githubusercontent.com/toadkicker/elk-stack/master/extra/billing/aws-billing-es-template.json`"'],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status.wait() != 0:
print('Something went wrong while creating mapping index')
sys.exit(1)
else:
print('ES mapping created :)')
else:
print('Template already exists')
def get_s3_bucket_dir_to_index(self):
key_names = self.s3.list_objects(
Bucket=self.bucketname,
Delimiter='/', # If this is not sent we don't get CommonPrefixes in the response
Prefix=self.path_name_s3_billing)
s3_dir_names = []
if 'CommonPrefixes' not in key_names:
return 1
for keys in key_names['CommonPrefixes']:
s3_dir_names.append(keys['Prefix'].split('/')[-2])
s3_dir_names.sort()
client = Elasticsearch()
index_list = client.indices.get_alias('aws-billing*').keys()
index_time = []
for i in index_list:
if i:
index_time.append(client.search(index=i, size=1, query={"query": {"match_all": {}}})[
'hits']['hits'][0]['_source']['@timestamp'])
index_time.sort(reverse=True)
dir_start = 0
dir_end = None
if index_time:
current_dir = dtd.today().strftime('%Y%m01') + '-' + (dtd.today() + relativedelta(months=1)).strftime('%Y%m01')
last_ind_dir = index_time[0].split('T')[0].replace('-', '')
last_ind_dir = dtdt.strptime(last_ind_dir, '%Y%m%d').strftime('%Y%m01') + '-' + (
dtdt.strptime(last_ind_dir, '%Y%m%d') + relativedelta(months=1)).strftime('%Y%m01')
dir_start = s3_dir_names.index(last_ind_dir)
dir_end = s3_dir_names.index(current_dir) + 1
s3_dir_to_index = s3_dir_names[dir_start:dir_end]
print('Months to be indexed: {}'.format(', '.join(s3_dir_to_index)))
# returning only the dirnames which are to be indexed
return s3_dir_to_index
def get_latest_zip_filename(self, monthly_dir_name):
# monthly_dir_name for aws s3 directory format for getting the correct json file
# json file name
latest_json_file_name = '/'.join([self.path_name_s3_billing, monthly_dir_name, '']) + self.s3_report_name + '-Manifest.json'
# download the jsonfile as getfile_$time.json from s3
print('Downloading {}...'.format(latest_json_file_name))
self.s3.download_file(
self.bucketname,
latest_json_file_name,
'getfile.json')
# read the json file to get the latest updated version of csv
f = open('getfile.json', 'r')
content = eval(f.read())
latest_gzip_filename = content['reportKeys'][0]
f.close()
return latest_gzip_filename
def get_req_csv_from_s3(self, monthly_dir_name, latest_gzip_filename):
# the local filename formated for compatibility with the go lang code billing_report_yyyy-mm.csv
local_gz_filename = 'billing_report_' + \
dtdt.strptime(monthly_dir_name.split('-')[0], '%Y%m%d').strftime('%Y-%m') + '.csv.gz'
local_csv_filename = local_gz_filename[:-3]
# downloading the zipfile from s3
self.s3.download_file(self.bucketname, latest_gzip_filename, local_gz_filename)
# upzip and replace the .gz file with .csv file
print("Extracting latest csv file")
process_gunzip = subprocess.Popen(['gunzip -v ' + local_gz_filename], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return local_csv_filename
def index_csv(self, filename, dir_name):
# DELETE earlier aws-billing* index if exists for the current indexing month
# current month index format (name)
index_format = dtdt.strptime(
dir_name.split('-')[0],
'%Y%m%d').strftime('%Y.%m')
os.environ['file_y_m'] = index_format
# have to change the name of the index in logstash index=>indexname
status = subprocess.Popen(
['curl -XDELETE localhost:9200/aws-billing-' + index_format], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if status.wait() != 0:
print(
'I think there are no aws-billing* indice or it is outdated, its OK main golang code will create a new one for you :)')
else:
print('aws-billing* indice deleted or Not found, its OK main golang code will create a new one for you :)')
# Run the main golang code to parse the billing file and send it to
# Elasticsearch over Logstash
status = subprocess.Popen(
['go run /aws-elk-billing/main.go --file /aws-elk-billing/' + filename], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(status.stdout.read())
print(status.stderr.read())
if status.wait() != 0:
print('Something went wrong while getting the file reference or while talking with logstash')
sys.exit(1)
else:
print('AWS Billing report sucessfully parsed and indexed in Elasticsearch via Logstash :)')
def index_kibana(self):
# Index the search mapping for Discover to work
status = subprocess.Popen(
['(cd /aws-elk-billing/kibana; bash orchestrate_search_mapping.sh)'],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status.wait() != 0:
print('The Discover Search mapping failed to be indexed to .kibana index in Elasticsearch')
sys.exit(1)
else:
print(
'The Discover Search mapping sucessfully indexed to .kibana index in Elasticsearch, Kept intact if user already used it :)')
# Index Kibana dashboard
status = subprocess.Popen(
['(cd /aws-elk-billing/kibana; bash orchestrate_dashboard.sh)'],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status.wait() != 0:
print('AWS-Billing-DashBoard default dashboard failed to indexed to .kibana index in Elasticsearch')
sys.exit(1)
else:
print(
'AWS-Billing-DashBoard default dashboard sucessfully indexed to .kibana index in Elasticsearch, Kept intact if user already used it :)')
# Index Kibana visualization
status = subprocess.Popen(
['(cd /aws-elk-billing/kibana; bash orchestrate_visualisation.sh)'],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if status.wait() != 0:
print('Kibana default visualizations failed to indexed to .kibana index in Elasticsearch')
sys.exit(1)
else:
print(
'Kibana default visualizations sucessfully indexed to .kibana index in Elasticsearch, Kept intact if user have already used it :)')
def delete_csv_json_files(self):
# delete all getfile json, csv files and part downloading files after indexing over
process_delete_csv = subprocess.Popen(
["find /aws-elk-billing -name 'billing_report_*' -exec rm -f {} \;"], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process_delete_json = subprocess.Popen(
["find /aws-elk-billing -name 'getfile*' -exec rm -f {} \;"], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
|
toadkicker/elk-stack
|
extra/billing/tools/tools.py
|
Python
|
apache-2.0
| 10,856
|
[
"Elk"
] |
bb8d5087d25b691d36f255be9f3e689b3665a870ae30dee012ac2aec125f9e93
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import abstractmethod
from zoo.orca.learn.base_estimator import BaseEstimator
class Estimator(BaseEstimator):
@abstractmethod
def fit(self, data, epochs, batch_size=32, feature_cols=None, label_cols=None,
validation_data=None, checkpoint_trigger=None):
"""
Train the model with train data.
:param data: train data.
:param epochs: number of epochs to train.
:param batch_size: total batch size for each iteration. Default: 32.
:param feature_cols: feature column names if train data is Spark DataFrame.
:param label_cols: label column names if train data is Spark DataFrame.
:param validation_data: validation data. Validation data type should be the same
as train data.
:param checkpoint_trigger: when to trigger checkpoint during training.
Should be a zoo.orca.learn.trigger, like EveryEpoch(), SeveralIteration(num_iterations),etc.
"""
raise NotImplementedError
@abstractmethod
def predict(self, data, batch_size=4, feature_cols=None):
"""
Predict input data
:param data: data to be predicted.
:param batch_size: batch size per thread. Default: 4.
:param feature_cols: list of feature column names if input data is Spark DataFrame.
:return: predicted result.
If input data is XShards or tf.data.Dataset, the predict result is a XShards,
and the schema for each result is: {'prediction': predicted numpy array or
list of predicted numpy arrays}.
"""
raise NotImplementedError
@abstractmethod
def evaluate(self, data, batch_size=32, feature_cols=None, label_cols=None):
"""
Evaluate model.
:param data: evaluation data.
:param batch_size: batch size per thread. Default: 32.
:param feature_cols: feature column names if train data is Spark DataFrame.
:param label_cols: label column names if train data is Spark DataFrame.
:return: evaluation result as a dictionary of {'metric name': metric value}
"""
raise NotImplementedError
@abstractmethod
def get_model(self):
"""
Get the trained model
:return: Trained model
"""
raise NotImplementedError
@abstractmethod
def save(self, model_path):
"""
Save model to model_path
:param model_path: path to save the trained model.
:return:
"""
raise NotImplementedError
@abstractmethod
def load(self, model_path):
"""
Load existing model from model_path
:param model_path: Path to the existing model.
:return:
"""
raise NotImplementedError
def set_tensorboard(self, log_dir, app_name):
"""
Set summary information during the training process for visualization purposes.
Saved summary can be viewed via TensorBoard.
In order to take effect, it needs to be called before fit.
Training summary will be saved to 'log_dir/app_name/train'
and validation summary (if any) will be saved to 'log_dir/app_name/validation'.
:param log_dir: The base directory path to store training and validation logs.
:param app_name: The name of the application.
"""
self.log_dir = log_dir
self.app_name = app_name
@abstractmethod
def clear_gradient_clipping(self):
"""
Clear gradient clipping parameters. In this case, gradient clipping will not be applied.
In order to take effect, it needs to be called before fit.
:return:
"""
raise NotImplementedError
@abstractmethod
def set_constant_gradient_clipping(self, min, max):
"""
Set constant gradient clipping during the training process.
In order to take effect, it needs to be called before fit.
:param min: The minimum value to clip by.
:param max: The maximum value to clip by.
:return:
"""
raise NotImplementedError
@abstractmethod
def set_l2_norm_gradient_clipping(self, clip_norm):
"""
Clip gradient to a maximum L2-Norm during the training process.
In order to take effect, it needs to be called before fit.
:param clip_norm: Gradient L2-Norm threshold.
:return:
"""
raise NotImplementedError
@abstractmethod
def get_train_summary(self, tag=None):
"""
Get the scalar from model train summary
Return list of summary data of [iteration_number, scalar_value, timestamp]
:param tag: The string variable represents the scalar wanted
"""
raise NotImplementedError
@abstractmethod
def get_validation_summary(self, tag=None):
"""
Get the scalar from model validation summary
Return list of summary data of [iteration_number, scalar_value, timestamp]
Note: The metric and tag may not be consistent
Please look up following form to pass tag parameter
Left side is your metric during compile
Right side is the tag you should pass
'Accuracy' | 'Top1Accuracy'
'BinaryAccuracy' | 'Top1Accuracy'
'CategoricalAccuracy' | 'Top1Accuracy'
'SparseCategoricalAccuracy' | 'Top1Accuracy'
'AUC' | 'AucScore'
'HitRatio' | 'HitRate@k' (k is Top-k)
'Loss' | 'Loss'
'MAE' | 'MAE'
'NDCG' | 'NDCG'
'TFValidationMethod' | '${name + " " + valMethod.toString()}'
'Top5Accuracy' | 'Top5Accuracy'
'TreeNNAccuracy' | 'TreeNNAccuracy()'
'MeanAveragePrecision' | 'MAP@k' (k is Top-k) (BigDL)
'MeanAveragePrecision' | 'PascalMeanAveragePrecision' (Zoo)
'StatelessMetric' | '${name}'
:param tag: The string variable represents the scalar wanted
"""
raise NotImplementedError
@abstractmethod
def load_orca_checkpoint(self, path, version):
"""
Load specified Orca checkpoint.
:param path: checkpoint directory which contains model.* and
optimMethod-TFParkTraining.* files.
:param version: checkpoint version, which is the suffix of model.* file,
i.e., for modle.4 file, the version is 4.
"""
raise NotImplementedError
def shutdown(self):
"""
Releases resources.
:return:
"""
pass
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/orca/learn/spark_estimator.py
|
Python
|
apache-2.0
| 7,286
|
[
"ORCA"
] |
b8e2405631b438eefc9e2f201877a6ba91787f6d7149327e735d85e57851fcb9
|
import ast
import gast
def _generate_translators(to):
class Translator(ast.NodeTransformer):
def _visit(self, node):
if isinstance(node, ast.AST):
return self.visit(node)
elif isinstance(node, list):
return [self._visit(n) for n in node]
else:
return node
def generic_visit(self, node):
cls = type(node).__name__
try:
new_node = getattr(to, cls)()
except AttributeError:
# handle nodes that are not part of the AST
return
for field in node._fields:
setattr(new_node, field, self._visit(getattr(node, field)))
for attr in node._attributes:
try:
setattr(new_node, attr, getattr(node, attr))
except AttributeError:
pass
return new_node
return Translator
AstToGAst = _generate_translators(gast)
GAstToAst = _generate_translators(ast)
|
serge-sans-paille/gast
|
gast/astn.py
|
Python
|
bsd-3-clause
| 1,057
|
[
"VisIt"
] |
b9cae02fc38c1807d65693f05543954c0d9c91295313cdc3ebb514d2ba7de10e
|
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import pandas as pd
import sys
import os
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.dates as md
from matplotlib.collections import LineCollection
import pylab
from scipy import stats
import datetime
import glob
import numpy.ma as ma
import importlib
import time_tools_attractor as ti
import io_tools_attractor as io
import data_tools_attractor as dt
fmt1 = "%.1f"
fmt2 = "%.2f"
fmt3 = "%.3f"
################# DEFAULT ARGS #########################
inBaseDir = '/store/msrad/radar/precip_attractor/data/' #'/scratch/lforesti/data/'
outBaseDir = '/users/lforesti/results/'
tmpBaseDir = '/scratch/lforesti/tmp/'
# Whether we used a variable scaling break
plotHistScalingBreak = False
########GET ARGUMENTS FROM CMD LINE####
parser = argparse.ArgumentParser(description='Plot radar rainfall field statistics.')
parser.add_argument('-start', default='201601310000', type=str,help='Starting date YYYYMMDDHHmmSS.')
parser.add_argument('-end', default='201601310000', type=str,help='Starting date YYYYMMDDHHmmSS.')
parser.add_argument('-product', default='AQC', type=str,help='Which radar rainfall product to use (AQC, CPC, etc).')
parser.add_argument('-wols', default=0, type=int,help='Whether to use the weighted ordinary leas squares or not in the fitting of the power spectrum.')
parser.add_argument('-minR', default=0.08, type=float,help='Minimum rainfall rate for computation of WAR and various statistics.')
parser.add_argument('-minWAR', default=5, type=float,help='Minimum WAR threshold for plotting.')
parser.add_argument('-minCorrBeta', default=0.95, type=float,help='Minimum correlation coeff. for beta for plotting.')
parser.add_argument('-accum', default=5, type=int,help='Accumulation time of the product [minutes].')
parser.add_argument('-temp', default=5, type=int,help='Temporal sampling of the products [minutes].')
parser.add_argument('-format', default='netcdf', type=str,help='Format of the file containing the statistics [csv,netcdf].')
parser.add_argument('-refresh', default=0, type=int,help='Whether to refresh the binary .npy archive or not.')
args = parser.parse_args()
refreshArchive = bool(args.refresh)
print('Refresh archive:', refreshArchive)
product = args.product
timeAccumMin = args.accum
timeSampMin = args.temp
timeAccumMinStr = '%05i' % timeAccumMin
timeSampMinStr = '%05i' % timeSampMin
if (int(args.start) > int(args.end)):
print('Time end should be after time start')
sys.exit(1)
if (int(args.start) < 198001010000) or (int(args.start) > 203001010000):
print('Invalid -start or -end time arguments.')
sys.exit(1)
else:
timeStartStr = args.start
timeEndStr = args.end
timeStart = ti.timestring2datetime(timeStartStr)
timeEnd = ti.timestring2datetime(timeEndStr)
if plotHistScalingBreak:
variableBreak = 1
else:
variableBreak = 0
############### OPEN FILES WITH STATS
## Open single binary python file with stats to speed up (if it exists)
tmpArchiveFileName = tmpBaseDir + timeStartStr + '-' + timeEndStr + '_temporaryAttractor.npy'
tmpArchiveFileNameVariables = tmpBaseDir + timeStartStr + '-' + timeEndStr + '_temporaryAttractor_varNames.npy'
if (os.path.isfile(tmpArchiveFileName) == True) and (refreshArchive == False):
arrayStats = np.load(tmpArchiveFileName)
arrayStats = arrayStats.tolist()
variableNames = np.load(tmpArchiveFileNameVariables)
print('Loaded:', tmpArchiveFileName)
else:
if args.format == 'csv':
arrayStats, variableNames = io.csv_list2array(timeStart, timeEnd, inBaseDir, analysisType='STATS', \
product = product, timeAccumMin = timeSampMin, minR=args.minR, wols=args.wols, variableBreak=variableBreak)
elif args.format == 'netcdf':
arrayStats, variableNames = io.netcdf_list2array(timeStart, timeEnd, inBaseDir, analysisType='STATS', \
product = product, timeAccumMin = timeAccumMin, minR=args.minR, wols=args.wols, variableBreak=variableBreak)
else:
print('Please provide a valid file format.')
sys.exit(1)
# Check if there are enough data
if (len(arrayStats) == 100) & (args.format == 'csv'):
print("Not enough data found in CSV files.")
sys.exit(1)
if (len(arrayStats) < 100) & (args.format == 'netcdf'):
print("No enough data found in NETCDF files.")
sys.exit(1)
## Save data into a single binary python file to speed up further analysis with same dataset
arrayData = []
if refreshArchive == True:
np.save(tmpArchiveFileName, arrayStats)
np.save(tmpArchiveFileNameVariables, variableNames)
print('Saved:',tmpArchiveFileName)
# Generate list of datetime objects
timeIntList = dt.get_column_list(arrayStats, 0)
timeStampsDt = ti.timestring_array2datetime_array(timeIntList)
# Convert list of lists to numpy array
arrayStats = np.array(arrayStats)
# Check if there are data
print(len(arrayStats),' samples found.')
print('Variables from file: ', variableNames)
#################################################################################
####################### PLOTTING MULTIPLE ATTRACTORS in combinations of dimensions
varNamesRows = ['war','r_cmean', 'beta1', 'beta2','eccentricity']
varNamesCols = ['war','r_cmean', 'beta1', 'beta2']
#varNamesRows = ['eccentricity']
#varNamesCols = ['eccentricity', 'war','r_cmean', 'beta1', 'beta2']
warThreshold = args.minWAR
betaCorrThreshold = args.minCorrBeta
print('Variables for plotting: ', varNamesRows, ' vs. ', varNamesCols)
############### AXIS LIMITS
boolLogPlot = True
boolPowPlotEccentricity = False
if boolLogPlot:
WARlims = [6,18] # [6,18]
IMFlims = [-25,10] # [-20,5]
MMlims = [-8,10] # [-20,5]
else:
WARlims = [warThreshold,60]
IMFlims = [0.03, 3.0]
MMlims = [0.5, 3.0] # [-20,5]
if boolPowPlotEccentricity:
ecclims = [1,10]
else:
ecclims = [0,1]
beta1lims = [1.2,2.8] #[1.6,2.8]
beta2lims = [2.2,4.3] #[3.2,4]
trajectoryPlot = 'sections' # 'lines' 'scatter' 'coloredlines' 'sections'
densityPlot = '2dhist'# 'kde' or '2dhist'
nrBinsX = 60
nrBinsY = 60
###############################################################################
# Generate labels for plotting
varNamesAll = varNamesRows + varNamesCols
varNamesAll = dt.unique(varNamesAll)
varLabels = []
for var in range(0, len(varNamesAll)):
if varNamesAll[var] == 'war':
varLabels.append('WAR')
if varNamesAll[var] == 'r_mean':
varLabels.append('IMF')
if varNamesAll[var] == 'r_cmean':
varLabels.append('MM')
if varNamesAll[var] == 'beta1':
varLabels.append(r'$\beta_1$')
if varNamesAll[var] == 'beta2':
varLabels.append(r'$\beta_2$')
if varNamesAll[var] == 'eccentricity':
varLabels.append('Eccentricity')
#####
# Get indices of variables
indicesVars = dt.get_variable_indices(varNamesAll, variableNames)
# Put indices into dictionary
dictIdx = dict(zip(varNamesAll, indicesVars))
# WAR threshold
boolWAR = (arrayStats[:,dictIdx['war']] >= warThreshold)
# Beta correlation threshold
boolBetaCorr = (np.abs(arrayStats[:,dictIdx['beta1']+1]) >= np.abs(betaCorrThreshold)) & (np.abs(arrayStats[:,dictIdx['beta2']+1]) >= np.abs(betaCorrThreshold))
# Combination of thresholds
boolTot = np.logical_and(boolWAR == True, boolBetaCorr == True)
nrSamplesWAR = np.sum(boolWAR)
nrSamplesBetasWAR = np.sum(boolBetaCorr & boolWAR)
fractionValidBetas = 100*np.sum(boolBetaCorr & boolWAR)/nrSamplesWAR
print("Percentage valid betas: ", fmt1 % fractionValidBetas, " %")
############### Select subset of variables
varData = []
for var in range(0, len(varNamesAll)):
varName = varNamesAll[var]
if (varName == 'beta1') | (varName == 'beta2'):
varData.append(-arrayStats[boolTot,dictIdx[varName]])
else:
varData.append(arrayStats[boolTot,dictIdx[varName]])
varData = np.array(varData).T
# Argument of maximum value (to select interesting cases)
# print(dictIdx)
# idxMax = np.argmax(arrayStats[:,6])
# maxWARtime = timeStampsDt[idxMax]
# print(maxWARtime, arrayStats[idxMax,:])
# sys.exit()
# Define variable indices of array subset
dictIdxSubset = dict(zip(varNamesAll, np.arange(len(varNamesAll))))
# Create array of data limits in correct order
varLimits = []
for var in range(0,len(varNamesAll)):
varName = varNamesAll[var]
if varName == 'war':
dataLimits = WARlims
if varName == 'r_mean':
dataLimits = IMFlims
if varName == 'r_cmean':
dataLimits = MMlims
if varName == 'beta1':
dataLimits = beta1lims
if varName == 'beta2':
dataLimits = beta2lims
if varName == 'eccentricity':
dataLimits = ecclims
varLimits.append(dataLimits)
#axesLimits = np.array([ecclims, WARlims, MMlims, beta1lims, beta2lims])
axesLimits = np.array(varLimits)
# Define surfaces of section
minPercSec = 48
maxPercSec = 52
medianSectionStart = np.percentile(varData, minPercSec, axis=0)
medianSectionEnd = np.percentile(varData, maxPercSec, axis=0)
#sectionIntervals = np.array([[10,12],[1.0,1.1], [2.0, 2.2], [3.0,3.2]])
sectionIntervals = np.vstack((medianSectionStart,medianSectionEnd)).T
##### Select subset of array within given range
#print((arrayStats[arrayStats[:,dictIdx['eccentricity']] > 0.96, 0]).astype(int))
#sys.exit()
# boolData = (varData[:,2] > 1.45) & (varData[:,2] < 1.55) & (varData[:,3] > 3.0) & (varData[:,3] < 3.1)
# #varData = varData[boolData,:]
# timeStampsStr = timeStampsStr[boolTot]
# timeStampsSel = timeStampsStr[boolData]
# nrConsecFields = 0
# for i in range(0,len(timeStampsSel)-1):
# timeDiff = (ti.timestring2datetime(timeStampsSel[i+1])-ti.timestring2datetime(timeStampsSel[i])).total_seconds()
# if (timeDiff == 300.0):
# nrConsecFields = nrConsecFields + 1
# else:
# nrConsecFields = 0
# if (nrConsecFields == 12):
# print(timeStampsSel[i-12])
################
############## HISTOGRAM SCALING BREAK
if plotHistScalingBreak:
indexScalingVar = dt.get_variable_indices('scaling_break', variableNames)
scalingBreak = arrayStats[boolTot,indexScalingVar]
scaleBreaks = np.unique(scalingBreak)
bins = np.hstack((scaleBreaks-1,50))
counts, bins = np.histogram(scalingBreak, bins = bins)
nrSamples = len(scalingBreak)
counts = 100.0*counts/float(nrSamples)
meanVal = np.nanmean(scalingBreak)
medianVal = np.nanmedian(scalingBreak)
stdVal = np.nanstd(scalingBreak)
width = 0.4 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2.0
# Plot hist
axSb = plt.gca()
print(scaleBreaks, counts)
plt.bar(scaleBreaks, counts, align='center', width=width, color='blue', edgecolor='blue')
textMedian = r'median = ' + str(int(medianVal))
textMean = r'$\mu$ = ' + str("%0.2f" % meanVal)
textStd = r'$\sigma$ = ' + str("%0.2f" % stdVal)
plt.text(0.75, 0.95, textMedian, transform=axSb.transAxes, fontsize=14)
plt.text(0.75, 0.91, textMean, transform=axSb.transAxes, fontsize=14)
plt.text(0.75, 0.87, textStd, transform=axSb.transAxes, fontsize=14)
maxPerc = 30
plt.ylim([0, maxPerc])
plt.xlabel('Scaling break [km]')
plt.ylabel('Frequency [%]')
#titleStr = 'Optimal scaling break \n' + product + ': ' + str(timeStampsDt[0]) + ' - ' + str(timeStampsDt[len(timeStampsDt)-1])
titleStr = 'Optimal scaling break \n' + product + ': ' + str(timeStampsDt[0].year)
plt.title(titleStr, fontsize=16)
#plt.show()
fileName = outBaseDir + product + timeStartStr + '-' + timeEndStr + '0_' + \
'Rgt' + str(args.minR) + '_WOLS' + str(args.wols) + '_00005_histScaleBreak_warGt' + str("%0.1f" % warThreshold) + '_' + timeAccumMinStr + '.png'
print('Saving: ',fileName)
plt.savefig(fileName, dpi=300)
sys.exit()
####### PLOT SCALING BREAK VS ECCENTRICITY/WAR
ecc_idx = dictIdxSubset['eccentricity']
war_idx = dictIdxSubset['war']
rcmean_idx = dictIdxSubset['r_cmean']
b1_idx = dictIdxSubset['beta1']
b2_idx = dictIdxSubset['beta2']
ecc = varData[:,ecc_idx]
eccDB = 10.0*np.log10(1-ecc)
rcmean = varData[:,rcmean_idx]
war = varData[:,war_idx]
warDB = 10.0*np.log10(war) - 10.0*np.log10(rcmean)
b1 = varData[:,b1_idx]
b2 = varData[:,b2_idx]
diffBeta = b2-b1
nrBinsX = 50
nrBinsY = 50
# xbins = np.linspace(np.min(eccDB), np.max(eccDB), nrBinsX)
xbins = np.linspace(np.min(warDB), np.max(warDB), nrBinsX)
ybins = np.linspace(np.min(diffBeta), np.max(diffBeta), nrBinsY)
# Compute histogram
# counts, _, _ = np.histogram2d(eccDB, diffBeta, bins=(xbins, ybins))
counts, _, _ = np.histogram2d(warDB, diffBeta, bins=(xbins, ybins))
slope, intercept, r, p_value, std_err = stats.linregress(warDB, diffBeta)
nrSamples = len(eccDB)
counts[counts == 0] = np.nan
counts = counts/nrSamples*100
countsMask = ma.array(counts, mask = np.isnan(counts))
maxFreq = nrBinsX/(nrBinsX/0.30)
mpl.rc('xtick', labelsize=15)
mpl.rc('ytick', labelsize=15)
#### Plotting
# fig, ax = plt.subplots()
# newax = ax.twiny()
plt.figure(figsize=(9,9))
histIm = plt.pcolormesh(xbins, ybins, countsMask.T, vmax = maxFreq)
# xrangeFit = np.array([np.min(xbins), np.max(xbins)])
# plt.plot(xrangeFit, intercept + slope*xrangeFit)
# plt.text(xrangeFit[0],intercept + slope*xrangeFit[0],'rho = ' + fmt2 % r)
# plt.scatter(ecc, b2-b1)
# Axes
# newax.xaxis.set_ticks_position('bottom')
# xlocs = np.linspace(np.min(eccDB), np.max(eccDB), 10)
# xlabs = dt.from_dB(xlocs)+1
# print(xlabs)
# newax.set_frame_on(True)
# newax.patch.set_visible(False)
# newax.set_xticks(xlocs)
# newax.set_xticklabels(xlabs)
# plt.xlabel('1-eccentricity [dB]', fontsize=22)
plt.xlabel('WAR - MM [dB]', fontsize=22)
plt.ylabel(r'$\beta_2$-$\beta_1$', fontsize=22)
plt.text(15,-0.3, 'Light widespread \n stratiform rain \n WAR >> MM', fontsize=16)
plt.text(-2,-0.3, 'Intense isolated \n convective rain \n MM >> WAR', fontsize=16)
plt.show()
sys.exit()
#############PLOT ATTRACTOR
# Compute duration of event for colour scale
durationFromStart = timeStampsDt[0] - timeStampsDt[len(timeStampsDt)-1]
hoursFromStart = np.abs(durationFromStart.total_seconds())/3600
daysFromStart = np.abs(durationFromStart.total_seconds())/(3600*24)
if daysFromStart > 5:
timeFromStart = daysFromStart
else:
timeFromStart = hoursFromStart
# Adjust size of subplot for better visualization
nrRows = len(varNamesRows)
nrCols = len(varNamesCols)
if (nrRows == 1) and ((nrCols == 3) or (nrCols == 4)):
nrRowsAdj = 2
nrColsAdj = 2
elif (nrRows == 1) and (nrCols == 5):
nrRowsAdj = 2
nrColsAdj = 3
else:
nrRowsAdj = nrRows
nrColsAdj = nrCols
print('Number of subplots: ', nrRowsAdj, 'x', nrColsAdj)
# Generate figure
plt.close("all")
if nrRowsAdj == nrColsAdj:
sizeFig = (11, 9.5)
else:
sizeFig = (13, 9.5)
fig = plt.figure(figsize=sizeFig)
ax = fig.add_axes()
ax = fig.add_subplot(111)
if nrRows == nrCols:
mpl.rc('xtick', labelsize=7)
mpl.rc('ytick', labelsize=7)
else:
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
from matplotlib.ticker import FormatStrFormatter
p = 0
for row in range(0,nrRows):
for col in range(0,nrCols): # loop first by changing the X axis variable and keeping the Y axis variable fixed
######## Select X and Y variable data
# Y variable (kept fixed for a given row)
varYname = varNamesRows[row]
idxColVar = dictIdxSubset[varYname]
if ((varNamesRows[row] == 'r_mean') or (varNamesRows[row] == 'r_cmean') or (varNamesRows[row] == 'war')) and boolLogPlot:
if (varNamesRows[row] == 'r_mean') or (varNamesRows[row] == 'r_cmean'):
offset = 0.005
elif varNamesRows[row] == 'war':
offset = 0.01
else:
offset = 0.0
varY = 10*np.log10(varData[:,idxColVar] + offset)
varYLab = varLabels[idxColVar] + ' [dB]'
elif varNamesRows[row] == 'eccentricity' and boolPowPlotEccentricity:
varY = 10**varData[:,idxColVar]
varYLab = '10^(' + varLabels[idxColVar] + ')'
else:
varY = varData[:,idxColVar]
varYLab = varLabels[idxColVar]
# X variable (will vary along the columns of the plot)
varXname = varNamesCols[col]
idxRowVar = dictIdxSubset[varXname]
if ((varNamesCols[col] == 'r_mean') or (varNamesCols[col] == 'r_cmean') or (varNamesCols[col] == 'war')) and boolLogPlot:
if (varNamesCols[col] == 'r_mean') or (varNamesCols[col] == 'r_cmean'):
offset = 0.005
elif varNamesCols[col] == 'war':
offset = 0.01
else:
offset = 0.0
varX = 10*np.log10(varData[:,idxRowVar] + offset)
varXLab = varLabels[idxRowVar] + ' [dB]'
elif varNamesCols[col] == 'eccentricity' and boolPowPlotEccentricity:
varX = 10**varData[:,idxRowVar]
varXLab = '10^(' + varLabels[idxRowVar] + ')'
else:
varX = varData[:,idxRowVar]
varXLab = varLabels[idxRowVar]
#########################################
if nrRows == nrCols:
ftSize = 8
else:
ftSize = 12
# Plot number...
p = p+1
axSP = plt.subplot(nrRowsAdj, nrColsAdj, p)
if (varYLab == r'$\beta_1$') or (varYLab == r'$\beta_2$') or (varYLab == 'IMF'):
axSP.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
if (varXLab == r'$\beta_1$') or (varXLab == r'$\beta_2$') or (varXLab == 'IMF'):
axSP.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# Compute liner regression
slope, intercept, r, p_value, std_err = stats.linregress(varData[:,col], varData[:,row])
xmin = np.min(varX)
xmax = np.max(varX)
ymin = np.min(varY)
ymax = np.max(varY)
############ Plot attractor trajectories or sections
if (row > col) and (nrRows == nrCols):
print('Drawing ', trajectoryPlot,' at row, col=', row, ',',col, ' - ', varLabels[row], ' vs ', varLabels[col])
titleStrSP = 'r=' + '%.2f' % r
if trajectoryPlot == 'coloredlines':
# Construct segments
points = np.array([varX, varY]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
##### Plot collection
lc = LineCollection(segments, cmap=plt.get_cmap('hsv'), norm=plt.Normalize(0, timeFromStart))
lc.set_array(np.array(timeFromStart))
lc.set_linewidth(1)
ax = axSP.add_collection(lc)
if trajectoryPlot == 'scatter':
plt.scatter(varX, varY)
if trajectoryPlot == 'lines':
plt.plot(varX, varY)
############# Plot sections
if trajectoryPlot == 'sections':
idxLabs = np.where(np.logical_and(varNamesAll != varNamesAll[col],varNamesAll != varNamesAll[row]))
# The section is defined by fixing an interval on the third variable
idxVar3 = idxLabs[0][0]
idxVar4 = idxLabs[0][1]
labVar3 = varLabels[idxVar3]
labVar4 = varLabels[idxVar4]
minInterval = sectionIntervals[idxVar3,0]
maxInterval = sectionIntervals[idxVar3,1]
print('Sect. for ', labVar3, ': ', minInterval, '-', maxInterval)
idxData = np.where(np.logical_and(varData[:,idxVar3] >= minInterval, varData[:,idxVar3] <= maxInterval))
varXsect = varX[idxData[0]]
varYsect = varY[idxData[0]]
# The colors of the dots are defined by the fourth variable
if (labVar4 == 'IMF' or labVar4 == 'MM' or labVar4 == 'WAR') and boolLogPlot:
if (labVar4 == 'IMF') or (labVar4 == 'MM'):
offset = 0.005
elif labVar4 == 'WAR':
offset = 0.01
else:
offset = 0.0
var4col = 10*np.log10(varData[idxData[0],idxVar4] + offset)
labVar4 = labVar4 + ' [dB]'
else:
var4col = varData[idxData[0],idxVar4]
# Scatter
vmin = axesLimits[idxVar4,0]
vmax = axesLimits[idxVar4,1]
vmin = np.percentile(var4col,5)
vmax = np.percentile(var4col,95)
scIm = plt.scatter(varXsect, varYsect, c=var4col, vmin=vmin, vmax=vmax, s=1.5, edgecolor='none')
cbar = plt.colorbar(scIm)
cbar.set_label(labVar4, labelpad=-15, y=1.10, rotation=0, fontsize=9)
#cbar.ax.set_title(labVar4)
#titleStrSP = 'Sect. for ' + labVar3 + ': ' + str(minInterval) + '-' + str(maxInterval)
titleStrSP = 'Surface section for ' + str(int((maxPercSec+minPercSec)/2))+ '-pctile \n' + labVar3 + ' in ' + str(fmt2 % minInterval) + '-' + str(fmt2 % maxInterval)
# Axis limits and title
plt.xlim(axesLimits[col,0],axesLimits[col,1])
plt.ylim(axesLimits[row,0],axesLimits[row,1])
plt.title(titleStrSP, fontsize=9)
############# Plot 2d histogram or kernel density
criterion_NxN = ((row < col) and (nrRows == nrCols))
criterion_NxP = ((nrRows != nrCols) and ((col != 0) or (row != 0)))
if criterion_NxN or criterion_NxP:
print('Drawing ', densityPlot,' at row, col=', row, ',',col, ' - ', varLabels[row], ' vs ', varLabels[col])
# Compute correlation
beta, intercept, r_beta, p_value, std_err = stats.linregress(varX, varY)
if densityPlot == 'kde':
# Compute kernel density
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([varX, varY])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
# Plot kernel density
#plt.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax], aspect='auto')
classLimits = np.concatenate((np.arange(0.001,0.01,0.001), np.arange(0.01,0.06,0.01),np.arange(0.1,1.1,0.1)))
normLog = colors.LogNorm(vmin=0, vmax=5)
Zmax = np.max(np.max(Z))
histIm = plt.contourf(X, Y, Z/Zmax, classLimits, cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax], aspect='auto')
#plt.title('KDE',fontsize=10)
if densityPlot == '2dhist':
# Plot 2D histogram
colorList = ['darkblue', 'green', 'yellow', 'orange', 'red', 'blueviolet', 'lavender']
cmapHist = colors.LinearSegmentedColormap.from_list('cmapHist', colorList, N=256)
# X-bins
#xbins = np.linspace(xmin, xmax, nrBinsX)
#ybins = np.linspace(ymin, ymax, nrBinsY)
xbins = np.linspace(axesLimits[col,0], axesLimits[col,1], nrBinsX)
ybins = np.linspace(axesLimits[row,0], axesLimits[row,1], nrBinsY)
# Compute histogram
counts, _, _ = np.histogram2d(varX, varY, bins=(xbins, ybins))
nrSamples = len(varX)
counts[counts == 0] = np.nan
counts = counts/nrSamples*100
countsMask = ma.array(counts,mask=np.isnan(counts))
# Draw histogram
maxFreq = nrBinsX/(nrBinsX/0.3)
histIm = plt.pcolormesh(xbins, ybins, countsMask.T, cmap=cmapHist, vmax = maxFreq)
#cbar = plt.colorbar(histIm)
# if varLabels[row] == 'IMF' or varLabels[row] == 'WAR':
# axSP.set_yscale('log')
# Directly plot histogram
#plt.hist2d(varX, varY, bins=20, cmin=1, cmap=cmapHist) #, norm=LogNorm()) # gist_ncar, jet, spectral
axSP.set_xlim(axesLimits[col,0],axesLimits[col,1])
axSP.set_ylim(axesLimits[row,0],axesLimits[row,1])
#axSP.set_aspect(1./ax.get_data_ratio())
corrText = 'R=' + str(fmt2 % r_beta)
if np.abs(r_beta) > 0.3:
colore = 'red'
else:
colore = 'black'
axSb = plt.gca()
plt.text(0.74, 0.93, corrText, transform=axSb.transAxes, fontsize=ftSize, color=colore,bbox=dict(facecolor='white', edgecolor='black', pad=1.0))
# Plot time series on the diagonal
criterion_NxN = (row == col) and (len(varX) <= 288*1) and (nrRows == nrCols)
if criterion_NxN: # plot max 5 days
axDiag=plt.gca()
plt.tick_params(bottom = 'off')
plt.xticks(rotation=90)
axDiag.plot(timeStampsDt, varY, 'b-')
xfmt = md.DateFormatter('%H') #'%Y-%m-%d %H:%M:%S'
axDiag.xaxis.set_major_formatter(xfmt)
# Plot 1d histogram
criterion_NxN = (row == col) and (len(varX) > 288*1) and (nrRows == nrCols)
criterion_NxP = ((nrRows != nrCols) and row == 0 and col == 0)
if criterion_NxN or criterion_NxP:
# Compute 1d histogram
counts, bins = np.histogram(varY, bins=nrBinsY, range=axesLimits[row,:])
nrSamples = len(varY)
counts = 100.0*counts/float(nrSamples)
medianVal = np.nanmedian(varY)
meanVal = np.nanmean(varY)
stdVal = np.nanstd(varY)
width = 0.4 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2.0
# Plot hist
axSb = plt.gca()
plt.bar(center, counts, align='center', width=width, color='blue', edgecolor='blue')
textMedian = r'median = ' + str("%0.2f" % medianVal)
textMean = r'$\mu$ = ' + str("%0.2f" % meanVal)
textStd = r'$\sigma$ = ' + str("%0.2f" % stdVal)
plt.text(0.05, 0.90, textMedian, transform=axSb.transAxes, fontsize=ftSize)
plt.text(0.05, 0.84, textMean, transform=axSb.transAxes, fontsize=ftSize)
plt.text(0.05, 0.78, textStd, transform=axSb.transAxes, fontsize=ftSize)
maxPerc = 15
plt.ylim([0, maxPerc])
# Axis labels
if col == 0 and (nrRows == nrCols):
plt.ylabel(varYLab, fontsize=12)
if (row == nrRows-1) and (nrRows == nrCols):
plt.xlabel(varXLab, fontsize=12)
if row == 0 and (nrRows == nrCols):
plt.title(varXLab, fontsize=12)
if (col == nrCols-1) and (nrRows == nrCols):
axSP.yaxis.set_label_position("right")
plt.ylabel(varYLab, fontsize=12)
# Axis labels for non square subplot matrix
if (nrRows != nrCols):
plt.xlabel(varXLab, fontsize=12)
plt.ylabel(varYLab, fontsize=12)
if row == 0 and col == 0:
plt.ylabel('Frequency [%]', fontsize=12)
fig.tight_layout()
fig.subplots_adjust(top=0.92, right=0.8)
# Main title
titleStr = product + ': ' + str(timeStampsDt[0]) + ' - ' + str(timeStampsDt[len(timeStampsDt)-1])
plt.suptitle(titleStr, fontsize=16)
# Colorbar for time from start of event
if trajectoryPlot == 'coloredlines':
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cbar = fig.colorbar(ax, cax = cbar_ax) # bug with fraction=0.03
if daysFromStart > 5:
cbar.ax.set_ylabel('Days from start of event')
else:
cbar.ax.set_ylabel('Hours from start of event')
# Write text on data conditions for analysis and plotting
if trajectoryPlot == 'sections':
xoffset = 0.84
yoffset = 0.92
lineSpacing = 0.03
fig.text(xoffset, yoffset, "Conditions:", fontsize=12, color='blue')
if (args.minR < 0.01):
textConditions = r"Rmin = " + (fmt3 % args.minR) + r' mm/hr'
else:
textConditions = r"Rmin = " + (fmt2 % args.minR) + r' mm/hr'
fig.text(xoffset, yoffset-1*lineSpacing, textConditions, fontsize=12, color='blue')
if args.wols == 0:
textConditions = "OLS"
if args.wols == 1:
textConditions = "Weighted OLS"
fig.text(xoffset, yoffset-2*lineSpacing, textConditions, fontsize=12, color='blue')
textConditions = "WAR $\geq $ " + str(fmt1 % warThreshold) + " %"
fig.text(xoffset, yoffset-3*lineSpacing, textConditions, fontsize=12, color='blue')
textConditions = r"$|r_\beta|$ $\geq $ " + str(fmt2 % betaCorrThreshold)
fig.text(xoffset, yoffset-5*lineSpacing, textConditions, fontsize=12, color='blue')
textConditions = r"$\frac{N_\beta}{N_{WAR}}$ = $\frac{" + str(nrSamplesBetasWAR) + r"}{" + str(nrSamplesWAR) + r"}$" #+ str(nrSamplesBetasWAR) str(nrSamplesWAR)
fig.text(xoffset, yoffset-6*lineSpacing, textConditions, fontsize=12, color='blue')
textConditions = r"$\frac{N_\beta}{N_{WAR}}$ = " + str(fmt1 % fractionValidBetas) + " %"
fig.text(xoffset, yoffset-7.2*lineSpacing, textConditions, fontsize=12, color='blue')
# Variables acronyms
fig.text(xoffset, 0.07, "WAR = Wet area ratio", fontsize=11, color='black')
fig.text(xoffset, 0.05, "MM = Marginal mean", fontsize=11, color='black')
fig.text(xoffset, 0.02, "dB = decibel", fontsize=11, color='black')
###### Save figure
fileName = outBaseDir + product + timeStartStr + '-' + timeEndStr + '0_' + \
'Rgt' + str(args.minR) + '_WOLS' + str(args.wols) + '_00005_attractorSubplots_warGt' + str("%0.1f" % warThreshold) + '_' + timeAccumMinStr + '.png'
print('Saving: ',fileName)
plt.savefig(fileName, dpi=300)
|
meteoswiss-mdr/precipattractor
|
pyscripts/read_plot_statistics.py
|
Python
|
gpl-3.0
| 30,288
|
[
"NetCDF"
] |
8af5e67777be1d1bf5fb3b22994e12ddc8d40e5701249f6a2e13d6ddac32a779
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2014 CRS4.
#
# This file is part of hadoop-galaxy, released under the terms of the BSD
# 3-Clause License <http://opensource.org/licenses/BSD-3-Clause>.
#
# END_COPYRIGHT
from glob import glob
import imp
import logging
import os
import sys
import unittest
_log = logging.getLogger('UnitTestRunner')
## Code borrowed from Seal (http://github.com/crs4/seal)
class UnitTestRunner(object):
def __init__(self, test_modules=None):
if test_modules:
self.autotest_list = test_modules
else:
proj_path = os.path.join(os.path.dirname(__file__), '..')
self.autotest_list = glob(os.path.join(proj_path, 'tests', 'test_*.py'))
_log.info("Autotest list: %s", self.autotest_list)
@staticmethod
def __load_suite(module_path):
module_name = os.path.splitext(os.path.basename(module_path))[0]
## so that test modules can import other modules in their own
## directories, we directly modify sys.path
sys.path.append(os.path.dirname(module_path))
fp, pathname, description = imp.find_module(module_name)
try:
module = imp.load_module(module_name, fp, pathname, description)
del sys.path[-1] # clean up to avoid conflicts
return module.suite()
finally:
fp.close()
def run(self):
print >> sys.stderr, "Running tests from these modules:", self.autotest_list
suites = map(UnitTestRunner.__load_suite, self.autotest_list)
test_result = unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(tuple(suites)))
return test_result
def main():
res = UnitTestRunner().run()
return 0 if res.wasSuccessful() else 1
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
|
crs4/hadoop-galaxy
|
utils/unit_test_runner.py
|
Python
|
bsd-3-clause
| 1,831
|
[
"Galaxy"
] |
5eac1862220dc689c93ac61ba825a6a5f72383baaa68d377b1f8a9d4fe72efdc
|
# -*- coding: utf-8 -*-
{
'(Recipient)': '(Empfänger)',
"'Cancel' will indicate an asset log entry did not occur": "'Abbrechen' zeigt an, dass ein Asset Log Eintrag nicht eingetreten ist",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Eine Position, die den geografischen Bereich für diese Region definiert. Dies kann ein Standort aus der Standorthierarchie, oder ein Gruppenstandort, oder ein Standort mit Grenzbereich sein.',
"Acronym of the organization's name, eg. IFRC.": 'Abkürzung des Organisationsnamen, z. B. IFRC.',
"Authenticate system's Twitter account": 'Authentifizierung für den Twitter Account des Systems',
"Can't import tweepy": 'Tweepy kann nicht importiert werden',
"Caution: doesn't respect the framework rules!": 'Achtung: Die Rahmenbedingungen des Frameworks werden nicht beachtet!',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Formatieren Sie die Liste der Attributwerte und die RGB-Wert zur Verwendung dieser als ein JSON-Objekt, z. B.: {Rot: '#FF0000 ', grün: '#00FF00 ', gelb: '#FFFF00 '}",
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Wenn ausgewählt, wird der Ort dieser Anlage immer aktualisiert, sobald der Standort der Person aktualisiert wird.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Wenn diese Konfiguration einen Bereich für die Regionenauswahl repräsentiert, geben Sie einen Namen für die Verwendung in der Auswahl. Der Name für eine persönliche Kartenkonfiguration wird mit dem Namen des Benutzers festgelegt.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Wenn dieses Feld ausgefüllt ist, dann wird ein Benutzer, der diese Organisation definiert, automatisch als Mitarbeiter dieser Organisation zugeordnet sobald er sich anmeldet, ausgenommen die Domäne stimmt nicht mit dem Domänenfeld überein.',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Wenn dies angekreuzt ist, wird es die Basisposition des Benutzers und dadurch gesteuert wo der Benutzer auf der Karte angezeigt wird.',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Wenn sie das Krankenhaus nicht in der Liste finden, können Sie ein neues hinzufügen, indem sie den Link 'Krankenhaus hinzufügen' anklicken.",
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "Wenn sie das Büro nicht in der Liste finden, können Sie ein neues hinzufügen, indem sie den Link 'Büro hinzufügen' anklicken.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Wenn sie die Organisation nicht in der Liste sehen, dann können sie eine neue hinzufügen indem sie auf den Link "Organisation hinzufügen" klicken.',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Anstelle der automatischen Synchronisation von anderen Peers über das Netz, können sie auch über Dateien synchronisieren, was nötig ist, wenn kein Netzwerk vorhanden ist. Sie können diese Seite verwenden um Sync Daten aus Dateien zu importieren and auch um Daten in Form von Sync Dateien zu exportieren. Ein Klick auf den Link rechts bringt Sie zu dieser Seite.',
"Level is higher than parent's": 'Die Stufe ist höher als das übergeordnete Element',
"Need a 'url' argument!": "Braucht eine 'url' als Argument!",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. Der Name der Geometrie-Spalte. In PostGIS ist der Standardwert 'the_geom'.",
"Parent level should be higher than this record's level. Parent level is": 'Übergeordnete Ebene muss höher als dieser Eintrag. Die Stufe seines Eltern Elements ist',
"Password fields don't match": 'Kennwortfelder stimmer nicht überein',
"Phone number to donate to this organization's relief efforts.": 'Telefonnummer für Spenden an diese Nothilfeorganisation.',
"Please come back after sometime if that doesn't help.": 'Wenn das nicht hilft, kommen Sie nach einiger Zeit bitte wieder.',
"Quantity in %s's Inventory": "Menge in %s's Bestand",
"Select a Room from the list or click 'Create Room'": "Wählen Sie einen Raum aus der Liste oder klicken Sie auf 'Raum hinzufügen'",
"Select a person in charge for status 'assigned'": 'Wählen Sie eine verantwortliche Person aus für den Status "zugeordnet"',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Wählen Sie diese Option, wenn alle speziellen administrativen Zuständigkeitsbereiche auf der untersten Hierarchieebene einen übergeordneten Zuständigkeitsbereich brauchen. Beispiel: Wenn 'district' der kleinste Bereich in der Hierarchie ist, dann müssen alle speziellen Bereiche einen 'district' als übergeordnetes Element haben.",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'Wählen Sie diese Option, wenn alle speziellen administrativen Zuständigkeitsbereiche einen übergeordneten Zuständigkeitsbereich in der Gebietshierarchie brauchen. Es kann dabei hilfreich sein eine "region" festzulegen, die den betroffenen Bereich repräsentiert.',
"Sorry, things didn't get done on time.": 'Leider konnten die Aufgaben nicht rechtzeitig ausgeführt werden.',
"Sorry, we couldn't find that page.": 'Leider konnte diese Seite nicht gefunden werden.',
"System's Twitter account updated": 'Der Twitter Account des Systems wurde aktualisiert',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "Die Spender für dieses Projekt. Mehrere Werte können durch Halten der 'Steuerungstaste' (Strg / Ctrl) ausgewählt werden.",
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'Die URL der Bilddatei. Wenn Sie keine Grafikdatei hochladen, dann müssen Sie hier eine URL angeben.',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Um nach einem Namen zu suchen, geben Sie durch Leerzeichen getrennt beliebig den Vor-, Mittel- oder Nachnamen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne eine Eingabe führt zur Auflistung aller Personen.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "Um nach einem Körper zu suchen, geben Sie die Identifikationsmarken-Nummer des Körpers ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Körper.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Für die Suche nach einem Krankenhaus, geben sie entweder den Namen, die ID, den Organisationsnamen oder ein Acronym jeweils getrennt durch Leerzeichen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Krankenhäuser.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Für die Suche nach einem Krankenhaus, geben Sie Namen oder die ID des Krankenhauses getrennt durch Leerzeichen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Krankenhäuser.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Um einen Ort zu suchen, geben Sie den Namen ein. Sie können % als Wildcard verwenden. Die Auswahl von Drücken 'Suchen' ohne Eingabe führt zur Auflistung aller Orte.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Um nach einer Person zu suchen, geben Sie durch Leerzeichen getrennt beliebig den Vor-, Mittel- oder Nachnamen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne eine Eingabe führt zur Auflistung aller Personen.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "Für die Suche nach einer Bewertung, geben Sie einen beliebigen Teil der Ticketnummer der Bewertung ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Bewertungen.",
"Type the first few characters of one of the Person's names.": 'Geben Sie die ersten paar Zeichen des Namens einer Person ein.',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Laden Sie hier die Grafikdatei hoch. Wenn sie keine Grafikdatei hochladen, dann müssen Sie im Feld eine URL auf eine im Web verfügbare Grafikdatei angeben.',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Beim Synchronisieren der Daten mit anderen Installationen, können Konflikte auftreten wenn beide (oder mehrere) Parteien die gleichen Daten geändert haben, d. h. widersprüchliche Informationen vorliegen. Das Synchronisationsmodul versucht solche Konflikte automatisch zu beheben, was jedoch in manchen Fällen nicht möglich ist. In solchen Fällen ist es Ihre Aufgabe, diese Konflikte manuell zu beheben; klicken Sie auf den rechten Link, um auf diese Seite zu gelangen.',
"You haven't made any calculations": 'Sie haben keine Brechnungen gemacht',
"couldn't be parsed so NetworkLinks not followed.": 'konnte nicht interpretiert so dass Netzwerklinks nicht verfolgt werden.',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Enthält ein GroundOverlay oder ScreenOverlay die in OpenLayers noch nicht unterstützt werden, es wird möglicherweise nicht richtig funktionieren.',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" ist ein optionaler Ausdruck wie "field1=\'newvalue\'\\ ". Sie können die Ergebnisse eines JOINs nicht aktualisieren oder löschen.',
'# of International Staff': '# der internationalen Mitarbeiter',
'# of National Staff': '# der nationalen Mitarbeiter',
'# of Vehicles': '# der Fahrzeuge',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\n Wenn der Typ des Requests "%(type)s" ist, geben Sie die %(type)s bitte auf der nächsten Seite ein.',
'%(system_name)s - Verify Email': '%(system_name)s - Email überprüfen',
'%s rows deleted': '%s gelöschte Zeilen',
'%s rows updated': '%s Zeilen aktualisiert',
'& then click on the map below to adjust the Lat/Lon fields': '& anschließend klicken Sie auf die Karte weiter unten um die Längen- und Breitengradwerte zu korrigieren',
'* Required Fields': '* erforderliche Felder',
'0-15 minutes': '0 - 15 Minuten',
'1 Assessment': '1 Bewertung',
'1 location, shorter time, can contain multiple Tasks': '1 Position, kürzere Zeit, kann mehrere Aufgaben beinhalten',
'1-3 days': '1-3 Tage',
'15-30 minutes': '15-30 Minuten',
'2 different options are provided here currently:': '2 verschiedene Optionen stehen hier derzeit zur Verfügung:',
'2x4 Car': 'Fahrzeug mit einer Antriebsachse',
'30-60 minutes': '30-60 Minuten',
'4-7 days': '4-7 Tage',
'4x4 Car': 'Allradfahrzeug',
'8-14 days': '8-14 Tage',
'3W': 'Wer? Was? Wo?',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Es kann eine Zuordnung eines Symbol zu einer individuellen Position erfolgen, um damit die Symbolisierung der Objektklasse zu überschreiben.',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Ein Referenzdokument wie z. B. eine Datei, URL oder Ansprechpartner zur Überprüfung dieser Daten. Sie können die ersten Zeichen eines vorhandenen Dokumentnamens eingeben um dieses zu referenzieren.',
'A brief description of the group (optional)': 'Eine kurze Beschreibung der Gruppe (optional)',
'A catalog of different Assessment Templates including summary information': 'Ein Katalog von verschiedenen Beurteilungsvorlagen inklusive einer Zusammenfassung',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Eine Datei von einem GPS Gerät das eine Reihe von geographischen Positionen im XML-Format enthält.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Eine Datei im GPX-Format aus einem GPS Gerät deren Zeitstempel genutzt werden können, um sie mit den Zeitstempeln von Fotos zu verknüpfen und diese dann auf einer Karte darzustellen.',
'A library of digital resources, such as photos, documents and reports': 'Eine Bibliothek von digitalen Ressourcen, wie z. B. Fotos, Dokumente und Berichte',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Eine Gebietsgruppe kann verwendet werden, um den Bereich eines betroffenen Gebietes zu definieren, falls dieses nicht mit einer vorhandenen administrativen Einheit zusammenfällt.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Eine Gebietsgruppe besteht aus mehreren Gebieten (häufig eine Gruppe von Verwaltungsregionen, die einen eigenen Zuständigkeitsbereich bilden).',
'A location group must have at least one member.': 'Eine Gebietsgruppe muss mindestens ein Element beinhalten.',
'ABOUT THIS MODULE': 'ÜBER DIESES MODUL',
'ACCESS DATA': 'ZUGRIFFSDATEN',
'Actioning officer': 'Verantwortliche Person',
'ANY': 'Irgendwelche',
'API is documented here': 'Die API ist hier dokumentiert',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 Schnelle Evaluierung - angepasst für Neuseeland',
'Abbreviation': 'Abkürzung',
'Ability to Fill Out Surveys': 'Möglichkeit Umfragen auszufüllen',
'Ability to customize the list of details tracked at a Shelter': 'Möglichkeit die Liste der Detailangaben zu einer Unterkunft anzupassen',
'Ability to customize the list of human resource tracked at a Shelter': 'Möglichkeit die Liste der menschlichen Ressourcen einer Unterkunft anzupassen',
'Ability to customize the list of important facilities needed at a Shelter': 'Möglichkeit die Liste mit den wichtigen Einrichtungen, die in einer Unterkunft benötigt werden, anzupassen',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Möglichkeit die Ergebnisse von abgeschlossen und/oder teilweise ausgefüllten Umfragen zu einzusehen',
'About': 'Über',
'About Us': 'Über uns',
'Accept Push': 'Akzeptiert Push',
'Access denied': 'Zugriff verweigert',
'Access to Shelter': 'Zugang zu Unterkünften',
'Access to education services': 'Zugang zu Ausbildungsdienstleistungen',
'Accessibility of Affected Location': 'Erreichbarkeit der betroffenen Region',
'Accompanied Child': 'Begleitetes Kind',
'Account Registered - Please Check Your Email': 'Benutzerkonto registriert - Bitte überprüfen Sie Ihre E-Mail',
'Account SID': 'SID des Accounts',
'Acronym': 'Abkürzung',
'Actionable by all targeted recipients': 'Bearbeitbar von allen adressierten Empfängern',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Bearbeitbar nur von bestimmten Übungsteilnehmern; Übungsidentifikator sollte unter <note> auftauchen',
'Actioned?': 'Bearbeitet?',
'Actions taken as a result of this request.': 'Als Ergebnis auf diese Anfrage gestartete Aktionen.',
'Actions': 'Aktionen',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Aktivieren Sie Ereignisse aus den SZENARIO Vorlagen um die passenden Ressourcen zuzuordnen (Menschen, Anlagen und Einrichtungen).',
'Active Problems': 'Aktive Probleme',
'Active': 'aktiv',
'Activities matching Assessments': 'Aktivitäten passend zur Beurteilung',
'Activities of boys 13-17yrs before disaster': 'Aktivitäten von Jungen im Alter zwischen 13-17 Jahren vor der Katastrophe',
'Activities of boys 13-17yrs now': 'Aktivitäten von Jungen im Alter zwischen 13-17 Jahren heute',
'Activities of boys <12yrs before disaster': 'Aktivitäten von Jungen unter 12 Jahren vor der Katastrophe',
'Activities of boys <12yrs now': 'Aktivitäten von Jungen unter 12 Jahren heute',
'Activities of children': 'Aktivitäten von Kindern',
'Activities of girls 13-17yrs before disaster': 'Aktivitäten von Mädchen im Alter von 13-17 Jahren vor der Katastrophe',
'Activities of girls 13-17yrs now': 'Aktivitäten von Mädchen im Alter von 13-17 Jahren heute',
'Activities of girls <12yrs before disaster': 'Aktivitäten von Mädchen unter 12 Jahren vor der Katastrophe',
'Activities of girls <12yrs now': 'Aktivitäten von Mädchen unter 12 Jahre heute',
'Activities': 'Aktivitäten',
'Activity Added': 'Aktivität hinzugefügt',
'Activity Deleted': 'Aktivität gelöscht',
'Activity Details': 'Details zur Aktivität',
'Activity Report': 'Bericht zur Aktivität',
'Activity Reports': 'Berichte zu Aktivitäten',
'Activity Type': 'Typ der Aktivität',
'Activity Types': 'Typen von Aktivität',
'Activity Updated': 'Aktivität aktualisiert',
'Activity': 'Aktivität',
'Add Activity Type': 'Aktivitätstyp hinzufügen',
'Add Address': 'Adresse hinzufügen',
'Add Alternative Item': 'Alternativen Artikel hinzufügen',
'Add Assessment Summary': 'Zusammenfassung der Beurteilung hinzufügen',
'Add Assessment': 'Beurteilung hinzufügen',
'Add Asset Log Entry - Change Label': 'Bestandsprotokoll Eintrag hinzufügen - Beschriftung verändern',
'Add Availability': 'Verfügbarkeit hinzufügen',
'Add Baseline Type': 'Basislinien-Typ hinzufügen',
'Add Baseline': 'Basislinie hinzufügen',
'Add Bundle': 'Paket hinzufügen',
'Add Camp Service': 'Camp-Dienst hinzufügen',
'Add Camp Type': 'Camp Art hinzufügen',
'Add Camp': 'Camp hinzufügen',
'Add Certificate for Course': 'Zertifikat für Kurs hinzufügen',
'Add Certification': 'Zertifizierung hinzufügen',
'Add Competency': 'Qualifikation hinzufügen',
'Add Contact': 'Kontaktperson hinzufügen',
'Add Contact Information': 'Kontaktinformation hinzufügen',
'Add Credential': 'Qualifikation hinzufügen',
'Add Credentials': 'Qualifikationen hinzufügen',
'Add Disaster Victims': 'Katastrophenopfer hinzufügen',
'Add Distribution.': 'Verteilung hinzufügen.',
'Add Donor': 'Spender hinzufügen',
'Add Flood Report': 'Flut Bericht hinzufügen',
'Add Group Member': 'Gruppenmitglied hinzufügen',
'Add Human Resource': 'Personal hinzufügen',
'Add Identity': 'Identität hinzufügen',
'Add Image': 'Bild hinzufügen',
'Add Impact Type': 'Auswirkungstyp Hinzufügen',
'Add Impact': 'Auswirkung hinzufügen',
'Add Item to Catalog': 'Artikel zu Katalog hinzufügen',
'Add Item to Commitment': 'Eintrag zur Zusage hinzufügen',
'Add Item to Inventory': 'Artikel zu Inventar hinzufügen',
'Add Item to Request': 'Artikel zur Anforderung hinzufügen',
'Add Item to Shipment': 'Artikel der Lieferung hinzufügen',
'Add Item': 'Artikel hinzufügen',
'Add Job Role': 'Tätigkeit hinzufügen',
'Add Key': 'Schlüssel hinzufügen',
'Add Kit': 'Ausstattung (Kit) hinzufügen',
'Add Layer to this Profile': 'Kartenebene zu diesem Profil hinzufügen',
'Add Level 1 Assessment': 'Stufe 1 Beurteilung hinzufügen',
'Add Level 2 Assessment': 'Stufe 2 Beurteilung hinzufügen',
'Add Location': 'Standort hinzufügen',
'Add Log Entry': 'Protokolleintrag hinzufügen',
'Add Member': 'Mitglied hinzufügen',
'Add Membership': 'Mitgliedschaft hinzufügen',
'Add Message': 'Nachricht hinzufügen',
'Add Mission': 'Auftrag hinzufügen',
'Add Mobile Commons Settings': 'Mobile Commons Einstellungen hinzufügen',
'Add Need Type': 'Bedarfstyp hinzufügen',
'Add Need': 'Bedarf hinzufügen',
'Add New Assessment Summary': 'Neue Beurteilungsbeschreibung hinzufügen',
'Add New Baseline Type': 'Einen neuen Grundlinientyp hinzufügen',
'Add New Baseline': 'Eine neue Grundlinie hinzufügen',
'Add New Budget': 'Ein neues Budget hinzufügen',
'Add New Bundle': 'Ein neues Paket hinzufügen',
'Add New Camp Service': 'Neuen Camp Service hinzufügen',
'Add New Camp Type': 'Neuen Camp Typ hinzufügen',
'Add New Camp': 'Neues Camp hinzufügen',
'Add New Cluster Subsector': 'Neuen Cluster Unterbereich hinzufügen',
'Add New Cluster': 'Neuen Cluster hinzufügen',
'Add New Commitment Item': 'Zugesagten Artikel hinzufügen',
'Add New Document': 'Neues Dokument hinzufügen',
'Add New Donor': 'Neuen Spender hinzufügen',
'Add New Entry': 'Neuen Eintrag hinzufügen',
'Add New Event': 'Neues Ereignis hinzufügen',
'Add New Flood Report': 'Neuen Flutbericht hinzufügen',
'Add New Human Resource': 'Neue Human Resource hinzufügen',
'Add New Image': 'Neue Grafik hinzufügen',
'Add New Impact Type': 'Neuen Auswirkungstyp hinzufügen',
'Add New Impact': 'Neue Auswirkung hinzufügen',
'Add New Item to Kit': 'Neuen Artikel zur Ausstattung (Kit) hinzufügen',
'Add New Key': 'Neuen Schlüssel hinzufügen',
'Add New Level 1 Assessment': 'Stufe 1 Beurteilung hinzufügen',
'Add New Level 2 Assessment': 'Stufe 2 Beurteilung hinzufügen',
'Add New Member': 'Neues Mitglied hinzufügen',
'Add New Membership': 'Neue Mitgliedschaft hinzufügen',
'Add New Need Type': 'Neuen Bedarfstyp hinzufügen',
'Add New Need': 'Neuen Bedarf hinzufügen',
'Add New Population Statistic': 'Neue Bevölkerungsstatistik hinzufügen',
'Add New Problem': 'Neues Problem hinzufügen',
'Add New Rapid Assessment': 'Neue Schnell-Beurteilung hinzufügen',
'Add New Received Item': 'Neuen erhaltenen Artikel hinzufügen',
'Add New Record': 'Neuen Datensatz hinzufügen',
'Add New Request Item': 'Neuen Anfrageartikel hinzufügen',
'Add New Request': 'Neue Anfrage hinzufügen',
'Add New River': 'Neuen Fluss hinzufügen',
'Add New Role to User': 'Benutzer eine neue Rolle zuweisen',
'Add New Scenario': 'Neues Szenario hinzufügen',
'Add New Sent Item': 'Neuen gesendeten Artikel hinzufügen',
'Add New Setting': 'Neue Einstellung hinzufügen',
'Add New Solution': 'Neue Lösung hinzufügen',
'Add New Staff Type': 'Neue Mitarbeitertyp hinzufügen',
'Add New Subsector': 'Neuen Teilbereich hinzufügen',
'Add New Survey Answer': 'Neue Antwort zur Umfrage hinzufügen',
'Add New Survey Question': 'Neue Frage zur Umfrage hinzufügen',
'Add New Survey Series': 'Neue Umfrageserie hinzufügen',
'Add New Survey Template': 'Neue Umfragevorlage hinzufügen',
'Add New Team': 'Neues Team hinzufügen',
'Add New Ticket': 'Neues Ticket hinzufügen',
'Add New Track': 'Neuen Pfad hinzufügen',
'Add New User to Role': 'Neuen Benutzer der Rolle hinzufügen',
'Add New': 'Neu hinzufügen',
'Add Organization Domain': 'Organisationsdomain hinzufügen',
'Add Peer': 'Peer-Zugriffspunkt hinzufügen',
'Add Person': 'Person hinzufügen',
'Add Photo': 'Foto hinzufügen',
'Add PoI': 'PoI hinzufügen',
'Add Population Statistic': 'Neue Bevölkerungsstatistik hinzufügen',
'Add Position': 'Position hinzufügen',
'Add Problem': 'Problem hinzufügen',
'Add Question': 'Frage hinzufügen',
'Add Rapid Assessment': 'Schnell-Beurteilung hinzufügen',
'Add Record': 'Datensatz hinzufügen',
'Add Reference Document': 'Referenzdokument hinzufügen',
'Add Report': 'Bericht hinzufügen',
'Add Request': 'Anfrage hinzufügen',
'Add Section': 'Abschnitt hinzufügen',
'Add Setting': 'Einstellung hinzufügen',
'Add Skill': 'Fähigkeit hinzufügen',
'Add Skill Equivalence': 'Fähigkeitsäquivalenz hinzufügen',
'Add Skill Provision': 'Fähigkeitsbestimmung hinzufügen',
'Add Skill to Request': 'Fähigkeit zur Anfrage hinzufügen',
'Add Solution': 'Lösung hinzufügen',
'Add Staff Type': 'Mitarbeitertyp hinzufügen',
'Add Subscription': 'Abonnement hinzufügen',
'Add Subsector': 'Teilbereich hinzufügen',
'Add Survey Answer': 'Umfrageantwort hinzufügen',
'Add Survey Question': 'Umfrage Frage hinzufügen',
'Add Survey Series': 'Umfrage Serie hinzufügen',
'Add Survey Template': 'Umfrage Vorlage hinzufügen',
'Add Team Member': 'Teammitglied hinzufügen',
'Add Team': 'Team hinzufügen',
'Add Ticket': 'Ticket hinzufügen',
'Add to Bin': 'Zum Lagerbehälter hinzufügen',
'Add Training': 'Schulung hinzufügen',
'Add Twilio Channel': 'Twilio Kanal hinzufügen',
'Add Twitter Channel': 'Twitter Kanal hinzufügen',
'Add Unit': 'Einheit hinzufügen',
'Add Vehicle': 'Fahrzeug hinzufügen',
'Add Vehicle Type': 'Fahrzeugtyp hinzufügen',
'Add Volunteer Availability': 'Verfügbarkeit von Freiwilligen hinzufügen',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Fügen Sie ein Referenzdokument z. B. eine Datei, URL oder einen Ansprechpartner zur Überprüfung dieser Daten ein. Wenn Sie kein Referenzdokument angeben, wird stattdessen ihre Mailadresse angezeigt.',
'Add a Volunteer': 'Einen Freiwilligen hinzufügen',
'Add a new certificate to the catalog.': 'Hinzufügen eines neuen Zertifikats zum Katalog',
'Add a new competency rating to the catalog.': 'Neue Kompetenzeinstufung zum Katalog hinzufügen',
'Add a new course to the catalog.': 'Neuen Kurs zum Katalog hinzufügen',
'Add a new job role to the catalog.': 'Neue Tätigkeit zum Katalog hinzufügen',
'Add a new skill provision to the catalog.': 'Neue Bereitstellung einer Fähigkeit zum Katalog hinzufügen',
'Add a new skill to the catalog.': 'Neue Fähigkeit zum Katalog hinzufügen',
'Add a new skill type to the catalog.': 'Neue Fähigkeitsart zum Katalog hinzufügen.',
'Add new Group': 'Neue Gruppe hinzufügen',
'Add new Individual': 'Hinzufügen neues Individuum',
'Add new project.': 'Neues Projekt hinzufügen.',
'Add staff members': 'Mitarbeiter hinzufügen',
'Add strings manually': 'Texte händisch hinzufügen',
'Add to a Team': 'Zu einem Team hinzufügen',
'Add to Bundle': 'Zu Paket hinzufügen',
'Add to budget': 'Zum Budget hinzufügen',
'Add volunteers': 'Freiwillige hinzufügen',
'Add': 'Hinzufügen',
'Add/Edit/Remove Layers': 'Hinzufügen/Bearbeiten/Entfernen von Kartenebenen',
'Added to Group': 'Zur Gruppe hinzugefügt',
'Added to Team': 'Zum Team hinzugefügt',
'Additional Beds / 24hrs': 'Zusätzliche Betten / 24 Std.',
'Address Details': 'Details zur Adresse',
'Address Type': 'Typ der Adresse',
'Address added': 'Adresse hinzugefügt',
'Address deleted': 'Adresse gelöscht',
'Address updated': 'Adresse aktualisiert',
'Address': 'Adresse',
'Addresses': 'Adressen',
'Adequate food and water available': 'Angemessene Nahrung und Wasser verfügbar',
'Adequate': 'Angemessen',
'Adjust Stock Levels': 'Lagerbestand anpassen',
'Adjust Stock': 'Lagerbestand anpassen',
'Admin': 'Administration',
'Admin Email': 'Email Administrator ',
'Admin Name': 'Name Administrator',
'Admin Tel': 'Telefonnummer Administrator',
'Administration': 'Administrator',
'Administrative support cost': 'Kosten für administrative Unterstützung',
'Admissions/24hrs': 'Einlass / 24 Stunden',
'Adolescent (12-20)': 'Heranwachsende (12-20)',
'Adolescent participating in coping activities': 'Teenager Teilnahme an Aktivitäten kopieren',
'Adopted Child': 'Adoptiertes Kind',
'Adult (21-50)': 'Erwachsene (21-50)',
'Adult ICU': 'Erwachsene ICU',
'Adult Psychiatric': 'Erwachsener - psychiatrisch auffällig',
'Adult female': 'Erwachsener - weiblich',
'Adult male': 'Erwachsener - männlich',
'Adults in prisons': 'Erwachsenen in Gefängnis',
'Advanced': 'Erweitert',
'Advanced Javascript Layers': 'Advanced Javascript Layers',
'Advisory': 'Beratend',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Nach einem Klick auf den Button, wird ein Satz von gekoppelten Elemente nacheinander gezeigt werden. Bitte wählen Sie diejenige Lösung aus jedem Paar, die sie gegenüber der anderen bevorzugen.',
'Age': 'Alter',
'Age Group': 'Altersgruppe',
'Age group does not match actual age.': 'Altersgruppe passt nicht zum tatsächlichen Alter.',
'Age group': 'Altersgruppe',
'Aggravating factors': 'Erschwerende Faktoren',
'Aggregate': 'Zusammenstellung',
'Agriculture': 'Landwirtschaft',
'Air Transport Service': 'Lufttransportsservice',
'Aircraft Crash': 'Flugzeugabsturz',
'Aircraft Hijacking': 'Flugzeugentführung',
'Aircraft Maximum Size': 'Maximale Größe des Flugzeugs',
'Airports': 'Flughäfen',
'Airport Closure': 'Flughafenschließung',
'Airspace Closure': 'Luftraumsperrung',
'Alcohol': 'Alkohol',
'All Activities': 'Alle Aktivitäten',
'All Inbound & Outbound Messages are stored here': 'Alle eingehenden und abgehenden Nachrichten werden hier gespeichert',
'All Resources': 'Alle Ressourcen',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Alle von der Sahana Software Foundation bereitgestellten Daten dieser Seite sind unter der Creative Commons Attribution licence lizenziert. Es stammen jedoch nicht alle Daten von hier. Bitte beachten Sie das Quellen-Feld des jeweiligen Eintrags.',
'All': 'Alles',
'All Records': 'Alle Datensätze',
'Allocate Group': 'Gruppe zuweisen',
'Allowance': 'Taschengeld',
'Allowances': 'Taschengelder',
'Allowed to push': 'Dürfen push',
'Allows a Budget to be drawn up': 'Ermöglicht ein Budget aufzustellen.',
'Allows authorized users to control which layers are available to the situation map.': 'Erlaubt berechtigten Benutzern zu steuern, welche Kartenebenen auf der Lagekarte verfügbar sind.',
'Alternative Item Details': 'Details zum alternativen Artikel',
'Alternative Item added': 'Alternativer Artikel hinzugefügt.',
'Alternative Item deleted': 'Alternativer Artikel gelöscht',
'Alternative Item updated': 'Alternativer Artikel aktualisiert',
'Alternative Item': 'Alternativer Artikel',
'Alternative Items': 'Alternative Artikel',
'Alternative places for studying': 'Alternative Orte für das Studium',
'Ambulance Service': 'Ambulanter Krankendienst',
'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'Es kann eine Beurteilungsvorlage zur Erstellung einer Katastrophenbeurteilung ausgewählt werden. Innerhalb der Katastrophenbeurteilung können Antworten gesammmelt und Ergebnisse in Form von Tabellen, Graphiken und Karten erzeugt werden.',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Ein Aufnahmesystem, ein Warenhausmanagementsystem, Warenlieferungsverfolgung, Versorgungskettenmanagement, Beschaffung und andere Anlagen-und Verwaltungsfunktionen.',
'An item which can be used in place of another item': 'Ein Artikel, der anstatt eines anderen Artikels verwendet werden kann',
'Analysis of Completed Surveys': 'Analyse von abgeschlossenen Umfragen',
'Animal Die Off': 'Tiere Sterben',
'Animal Feed': 'Tierfutter',
'Anthropology': 'Anthropologie',
'Antibiotics available': 'Antibiotika verfügbar',
'Antibiotics needed per 24h': 'Menge an Antibiotika die pro 24h benötigt wird',
'Apparent Age': 'Offensichtliches Alter',
'Apparent Gender': 'Offensichtliches Geschlecht',
'Application': 'Anwendung',
'Application Deadline': 'Anwendung Frist',
'Application Permissions': 'Anwendungsberechtigungen',
'Appointments': 'Termine',
'Appointment Type': 'Terminart',
'Approve': 'Bestätigen',
'Approved': 'Bestätigt',
'Approver': 'Bestätigende Stelle',
'Archived Cases': 'Archivierte Fälle',
'Arctic Outflow': 'Arktischer Abfluss',
'Areas inspected': 'Untersuchte Gebiete',
'Assessment Details': 'Details zur Beurteilung',
'Assessment Reported': 'Beurteilung gemeldet',
'Assessment Summaries': 'Zusammenfassungen der Beurteilung',
'Assessment Summary Details': 'Details zur Zusammenfassung der Beurteilung',
'Assessment Summary added': 'Zusammenfassung der Beurteilung hinzugefügt',
'Assessment Summary deleted': 'Zusammenfassung der Beurteilung gelöscht',
'Assessment Summary updated': 'Zusammenfassung der Beurteilung aktualisiert',
'Assessment added': 'Beurteilung hinzugefügt',
'Assessment admin level': 'Admin Ebene zur Beurteilung',
'Assessment deleted': 'Beurteilung gelöscht',
'Assessment timeline': 'Beurteilungszeitachse',
'Assessment updated': 'Beurteilung aktualisiert',
'Assessment': 'Beurteilung',
'Assessment Templates': 'Beurteilungsvorlage',
'Assessments Needs vs. Activities': 'Bedarf für Beurteilungen gegenüber den Aktivitäten',
'Assessments and Activities': 'Beurteilungen und Aktivitäten',
'Assessments': 'Beurteilungen',
'Assessor': 'Beurteilender',
'Asset Details': 'Details zur Anlage',
'Asset Log Details': 'Anlage Protokoll Details',
'Asset Log Empty': 'Anlage Protokoll leer',
'Asset Log Entry Added - Change Label': 'Anlage Protokolleintrag hinzugefügt - Beschriftung ändern',
'Asset Log Entry deleted': 'Anlage Protokolleintrag gelöscht',
'Asset Log Entry updated': 'Anlage Protokolleintrag aktualisiert',
'Asset Management': 'Anlageverwaltung',
'Asset Number': 'Anlagenummer',
'Asset added': 'Anlage hinzugefügt',
'Asset deleted': 'Anlage gelöscht',
'Asset removed': 'Anlage entfernt',
'Asset updated': 'Anlage aktualisiert',
'Asset': 'Anlage',
'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Anlagen sind Ressourcen, die nicht verbrauchbar sind aber zurück erwartet werden, daher müssen sie nachverfolgt werden.',
'Assets': 'Anlagen',
'Assign Group': 'Gruppe zuordnen',
'Assign Staff': 'Mitarbeiter zuordnen',
'Assign to Org.': 'Der Org. zuordnen',
'Assign to Organization': 'Der Organisation zuordnen',
'Assign to Person': 'Der Person zuordnen',
'Assign to Site': 'Dem Standort zuordnen',
'Assign': 'Zuordnen',
'Assign ': 'Zuordnung ',
'Assigned By': 'Zugeordnet von',
'Assigned To': 'Zugeordnet zu',
'Assigned to Organization': 'Zur Organisation zugeordnet',
'Assigned to Person': 'Zur Person zugeordnet',
'Assigned to Site': 'Zum Standort zugeordnet',
'Assigned to': 'Zugeordnet zu',
'Assigned': 'Zugeordnet',
'Asylum Application': 'Asylantrag',
'At/Visited Location (not virtual)': '/ In Augenschein genommener Ort (nicht virtuell)',
'Attachments': 'Anhänge',
'Attend to information sources as described in <instruction>': 'Sich um Informationsquellen kümmern wie im Abschnitt beschrieben',
'Attribution': 'Eigenschaften',
'Authentication Required': 'Anmeldung erforderlich',
'Author': 'Autor',
'Availability': 'Verfügbarkeit',
'Available Alternative Inventories': 'Verfügbare alternative Bestände',
'Available Beds': 'Verfügbare Betten',
'Available Inventories': 'Verfügbare Bestände',
'Available Messages': 'Verfügbare Nachrichten',
'Available Records': 'Verfügbare Datensätze',
'Available databases and tables': 'Verfügbare Datenbanken und Tabellen',
'Available for Location': 'Verfügbar für Ort',
'Available from': 'Verfügbar von',
'Available in Viewer?': 'Verfügbar in Lagedarstellung?',
'Available until': 'Verfügbar bis',
'Avalanche': 'Lawine',
'Average': 'Durchschnitt',
'Avoid the subject event as per the <instruction>': 'Vermeiden das Thema Ereignis als je<instruction>',
'Awards': 'Auszeichnungen',
'Background Color for Text blocks': 'Hintergrundfarbe für Textblöcke',
'Background Color': 'Hintergrundfarbe',
'Baldness': 'Kahlköpfigkeit',
'BAMF Registration': 'BAMF Registrierung',
'Banana': 'Banane',
'Bank/micro finance': 'Bank/Mikro Finanzierung',
'Barge Capacity': 'Frachtschiffkapazitäten',
'Barricades are needed': 'Barrikaden sind erforderlich',
'Base Layer?': 'Basis Kartenebene?',
'Base Location': 'Basis Standort/Region',
'Base Site Set': 'Basisstandort definieren',
'Baseline Data': 'Referenzdatum Daten',
'Baseline Number of Beds': 'Referenzdatum Anzahl von Betten',
'Baseline Type Details': 'Referenzdatumstyp Details',
'Baseline Type added': 'Referenzdatumstyp hinzugefügt',
'Baseline Type deleted': 'Referenzdatumstyp gelöscht',
'Baseline Type updated': 'Referenzdatumstyp aktualisiert',
'Baseline Type': 'Referenzdatumstyp',
'Baseline Types': 'Referenzdatumstypen',
'Baseline added': 'Referenzdatum hinzugefügt',
'Baseline deleted': 'Referenzdatum gelöscht',
'Baseline number of beds of that type in this unit.': 'Referenzdatum Anzahl von Betten dieses Typs in dieser Einheit.',
'Baseline updated': 'Referenzdatum aktualisiert',
'Baselines Details': 'Referenzdaten Details',
'Baselines': 'Referenzdaten',
'Basic Assessment Reported': 'Grundlegende Beurteilung berichtet',
'Basic Assessment': 'Grundlegende Beurteilung',
'Basic Details': 'Grundlegende Details',
'Basic reports on the Shelter and drill-down by region': 'Grundlegende Berichte über Unterkunft und Drill-down nach Region',
'Baud rate to use for your modem - The default is safe for most cases': 'Baudrate für das Modem - der Standardwert in den meisten Fällen ausreichend',
'BEA Registration': 'BEA Registrierung',
'Beam': 'Träger',
'Bed Capacity per Unit': 'Bettenkapazität pro Einheit',
'Bed Capacity': 'Bettenkapazität',
'Bed Type': 'Bett-Typ',
'Bed type already registered': 'Bett-Typ bereits registriert',
'Below ground level': 'Unter dem Erdgeschoss',
'Beneficiaries': 'Begünstigte',
'Beneficiary': 'Begünstigter',
'Beneficiary Type': 'Typ des Begünstigten',
'Biological Hazard': 'Biologische Gefahr',
'Bin': 'Lagerbehälter',
'Biscuits': 'Kekse',
'Blizzard': 'Schneesturm',
'Blood Type (AB0)': 'Blutgruppe (ABO)',
'Blowing Snow': 'Schneewehen',
'Boat': 'Boot',
'Bodies found': 'Leichen gefunden',
'Bodies recovered': 'Leichen geborgen',
'Body Recovery Request': 'Leichenbergungsanforderung',
'Body Recovery Requests': 'Leichenbergungsanforderungen',
'Body': 'Body',
'Bomb Explosion': 'Bombenexplosion',
'Bomb Threat': 'Bombendrohung',
'Bomb': 'Bombe',
'Border Color for Text blocks': 'Rahmenfarbe für Textblöcke',
'Both': 'Beides',
'Brand Details': 'Details zur Marke',
'Brand added': 'Marke hinzugefügt',
'Brand deleted': 'Marke gelöscht',
'Brand updated': 'Marke aktualisiert',
'Brand': 'Marke',
'Brands': 'Marken',
'Bricks': 'Ziegelsteine',
'Bridge Closed': 'Brücke ist geschlossen',
'Bucket': 'Eimer',
'Budget Details': 'Details zum Budget',
'Budget Updated': 'Budget aktualisiert',
'Budget added': 'Budget hinzugefügt',
'Budget deleted': 'Budget gelöscht',
'Budget updated': 'Budget aktualisiert',
'Budget': 'Budget',
'Budgeting Module': 'Budget Modul',
'Buffer': 'Puffer',
'Bug': 'Programmfehler',
'Building Assessments': 'Gebäudebeurteilungen',
'Building Collapsed': 'Gebäude zusammengebrochen',
'Building Name': 'Name des Gebäudes',
'Building Safety Assessments': 'Bewertung Gebäudesicherheit',
'Building Short Name/Business Name': 'Gebäude Kurzname / Firmenname',
'Building or storey leaning': 'Gebäude- oder Stockwerkneigung',
'Built using the Template agreed by a group of NGOs working together as the': 'Erstellt unter Verwendung einer abgestimmten Vorlage einer Gruppe von NGOs unter dem Namen',
'Bulk Status Update': 'Massen-Statusaktualisierung',
'Bulk Uploader': 'Upload von Massendaten',
'Bundle Contents': 'Produktpaket Inhalt',
'Bundle Details': 'Produktpaket Details',
'Bundle Updated': 'Produktpaket aktualisiert',
'Bundle added': 'Produktpaket hinzugefügt',
'Bundle deleted': 'Produktpaket gelöscht',
'Bundle updated': 'Produktpaket aktualisiert',
'Bundle': 'Produktpaket',
'Bundles': 'Produktpakete',
'Burn ICU': 'Verbrennungseinheit',
'Burn': 'Brennen',
'Burned/charred': 'Verbrannt / verkohlt',
'By': 'Nach',
'By Einrichtung': 'Nach Einrichtung',
'By Facility': 'Nach Einrichtung',
'By Inventory': 'Nach Bestand',
'CBA Women': 'Frauen CBA',
'CSS file %s not writable - unable to apply theme!': 'CSS Datei %s nicht beschreibbar - Motiv kann nicht angewendet werden!',
'Calculate': 'Starte Berechnung',
'Camp Coordination/Management': 'Camp Koordinierung / Management',
'Camp Service Details': 'Details zu Camp Leistung',
'Camp Service added': 'Camp Leistung hinzugefügt',
'Camp Service deleted': 'Camp Leistung gelöscht',
'Camp Service updated': 'Leistung des Camps aktualisiert',
'Camp Services': 'Leistungen des Camps',
'Camp Type Details': 'Details zum Camp Typ',
'Camp Type added': 'Camp Typ hinzugefügt',
'Camp Type deleted': 'Camp Typ gelöscht',
'Camp Type updated': 'Camp Typ aktualisiert',
'Camp Type': 'Camp Typ',
'Camp Types and Services': 'Camp Typen und Leistungen',
'Camp Types': 'Camp Typen',
'Camp added': 'Camp hinzugefügt',
'Camp deleted': 'Camp gelöscht',
'Camp updated': 'Camp aktualisiert',
'Camp': 'Camp',
'Campaign ID': 'Kampagnen ID',
'Camps': 'Camps',
'Can only disable 1 record at a time!': 'Ein Datensatz kann nur einzeln deaktiviert werden!',
'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Kann PoIs nur aus einer OpenStreetMap Datei (.osm) oder einem mirror lesen.',
'Cancel': 'Abbrechen',
'Cancel Log Entry': 'Protokolleintrag abbrechen',
'Cancel Shipment': 'Lieferung stornieren',
'Canceled': 'Abgebrochen',
'Cancelled': 'Abgesagt',
'Candidate Matches for Body %s': 'Übereinstimmung des Kandidaten mit Körper %s',
'Canned Fish': 'Fischkonserven',
'Cannot be empty': 'Darf nicht leer sein',
'Cannot disable your own account!': 'Eigenes Konto kann nicht deaktiviert werden.',
'Capacity': 'Maximale Kapazität',
'Capacity (Max Persons)': 'Kapazität (Maximale Zahl von Personen)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Erfassung von Informationen über Opfergruppen einer Katastrophe (Touristen, Fahrgäste, Familien, etc.)',
'Capture Information on each disaster victim': 'Erfassung von Informationen über jedes Opfer einer Katastrophe.',
'Capturing the projects each organization is providing and where': 'Erfassen der Projekte, die von jeder Organisation bereitgestellt werden und wo',
'Cardiology': 'Kardiologie',
'Cargo Pier Depth': 'Wassertiefe Frachtpier',
'Case Number': 'Fallnummer',
'Case Status': 'Fallstatus',
'Cases': 'Fälle',
'Cash': 'Bargeld',
'Cassava': 'Maniok',
'Casual Labor': 'Gelegenheitsarbeit',
'Casualties': 'Todesopfer',
'Catalog Details': 'Details zum Katalog',
'Catalog Item added': 'Katalog Eintrag hinzugefügt',
'Catalog Item deleted': 'Katalog Eintrag gelöscht',
'Catalog Item updated': 'Katalog Eintrag aktualisiert',
'Catalog Items': 'Katalog Einträge',
'Catalog added': 'Katalog hinzugefügt',
'Catalog deleted': 'Katalog gelöscht',
'Catalog updated': 'Katalog aktualisiert',
'Catalog': 'Katalog',
'Catalogs': 'Kataloge',
'Categories': 'Kategorien',
'Category': 'Kategorie',
'Ceilings, light fixtures': 'Höchstgrenzen, Licht Ausstattungsmerkmal',
'Central point to record details on People': 'Zentrale Personenregistrierungsstelle',
'Certificate Catalog': 'Zertifikatskatalog',
'Certificate Details': 'Details zum Zertifikat',
'Certificate Status': 'Status des Zertifikats',
'Certificate added': 'Zertifikat hinzugefügt',
'Certificate deleted': 'Zertifikat gelöscht',
'Certificate updated': 'Zertifikat aktualisiert',
'Certificate': 'Zertifikat',
'Certificates': 'Zertifikate',
'Certification Details': 'Zertifizierungsdetails',
'Certification added': 'Zertifizierung hinzugefügt',
'Certification deleted': 'Zertifizierung gelöscht',
'Certification updated': 'Zertifizierung aktualisiert',
'Certification': 'Zertifizierung',
'Certifications': 'Zertifizierungen',
'Certifying Organization': 'Zertifizierende Organisation',
'Change Password': 'Passwort ändern',
'Channel': 'Kanal',
'Check-in date': 'Check-In Datum',
'Check-out date': 'Check-Out Datum',
'Check Request': 'Anfrage prüfen',
'Check for errors in the URL, maybe the address was mistyped.': 'Prüfen Sie auf Fehler in der URL, vielleicht wurde die Adresse falsch eingegeben.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Prüfen Sie ob die URL auf ein Verzeichnis anstelle einer Webseite verweist',
'Check outbox for the message status': 'Überprüfen sie den Status der Nachricht im Nachrichtenausgang',
'Check to delete': 'Anwahl zum Löschen',
'Check': 'Prüfen',
'Checked': 'Geprüft',
'Checklist created': 'Prüfliste erstellt',
'Checklist deleted': 'Prüfliste gelöscht',
'Checklist of Operations': 'Checkliste für Operationen',
'Checklist updated': 'Checkliste aktualisiert',
'Checklist': 'Prüfliste',
'Chemical Hazard': 'Chemische Gefahr',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemische, Biologische, Radiologische, Nukleare order höchst explosive Gefahr oder Angriff',
'Chicken': 'Huhn',
'Child (2-11)': 'Kind (2-11)',
'Child (< 18 yrs)': 'Kind (< 18 Jahre)',
'Child Abduction Emergency': 'Kindesentführung Notfall',
'Child headed households (<18 yrs)': 'Kindgeführte Haushalte (<18 Jahre)',
'Child': 'Kind',
'Children (2-5 years)': 'Kinder (2-5 Jahre)',
'Children (5-15 years)': 'Kinder (5-15 Jahre)',
'Children (< 2 years)': 'Kinder (< 2 Jahre)',
'Children in adult prisons': 'Kinder in Gefängnissen für Erwachsene',
'Children in boarding schools': 'Kinder in Internaten',
'Children in homes for disabled children': 'Kinder in Unterkünften für behinderte Kinder',
'Children in juvenile detention': 'Kinder in Jugendstrafheimen',
'Children in orphanages': 'Kinder in Waisenhäusern',
'Children living on their own (without adults)': 'Alleinlebende Kinder (ohne Erwachsene)',
'Children not enrolled in new school': 'Kinder, die nicht in der neuen Schule registriert sind',
'Children orphaned by the disaster': 'Durch die Katastrophe verwaiste Kinder',
'Children separated from their parents/caregivers': 'Von Ihren Eltern/Betreuern getrennte Kinder',
'Children that have been sent to safe places': 'Kinder die an sichere Orte gesendet wurden',
'Children who have disappeared since the disaster': 'Kinder, die seit der Katastrophe verschwunden sind',
'Chinese (Taiwan)': 'Chinesisch (Taiwan)',
'Cholera Treatment Capability': 'Cholera Behandlungsmöglichkeiten',
'Cholera Treatment Center': 'Cholera Behandlungscenter',
'Cholera Treatment': 'Cholera Behandlung',
'Cholera-Treatment-Center': 'Cholera-Behandlung-Center',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Wählen Sie eine neue Meldung basierend der neuen Bewertung und Teamurteil. Schwerwiegende Bedingungen, die das gesamte Gebäude betreffen sind der Grund für eine UNSICHER Markierung. Lokalisierte schwere und insgesamt moderate Bedingungen können möglicherweise eine eingeschränkte Verwendung erfordern. Platziere GEPRÜFT Plakat am Haupteingang Positionieren Sie alle anderen Schilder auf jeden wichtigen Eingang.',
'Church': 'Kirche',
'City': 'Ort/Stadt',
'City / Town / Village': 'Stadt / Ort / Dorf',
'Civil Emergency': 'Ziviler Notfall',
'Cladding, glazing': 'Verkleidung, Verglasung',
'Clear': 'Löschen',
'Clear filter': 'Filter zurücksetzen',
'Click on the link %(url)s to reset your password': 'Klicken sie auf den Link %(url)s um ihr Kennwort zurückzusetzen',
'Click on the link %(url)s to verify your email': 'Klicken sie auf den Link %(url)s zum Überprüfen ihrer EMail Adresse',
'Click where you want to open Streetview': 'Auswahl um Streetview zu öffnen',
'Clinical Laboratory': 'Klinisches Labor',
'Clinical Operations': 'Klinikbetrieb',
'Clinical Status': 'Klinischer Status',
'Closed': 'Geschlossen',
'Closed at': 'Geschlossen am',
'Clothing': 'Kleidung',
'Cluster Details': 'Details zum Cluster',
'Cluster Distance': 'Cluster Abstand',
'Cluster Subsector Details': 'Cluster Teilbereich Details',
'Cluster Subsector added': 'Cluster Teilbereich hinzugefügt',
'Cluster Subsector deleted': 'Cluster Teilbereich gelöscht',
'Cluster Subsector updated': 'Cluster Teilbereich aktualisiert',
'Cluster Subsector': 'Cluster Teilsektor',
'Cluster Subsectors': 'Cluster Teilsektoren',
'Cluster Threshold': 'Cluster Schwellwert',
'Cluster added': 'Cluster hinzugefügt',
'Cluster deleted': 'Cluster gelöscht',
'Cluster updated': 'Cluster aktualisiert',
'Cluster': 'Cluster',
'Cluster(s)': 'Cluster',
'Clusters': 'Cluster',
'Cold Wave': 'Kältewelle',
'Collapse, partial collapse, off foundation': 'Zusammengefallen, teilweise zusammengefallen, ohne Unterbau',
'Collective center': 'Kollektivcenter',
'Color for Underline of Subheadings': 'Farbe der Unterstreichungslinie von untergeordneten Überschriften',
'Color of Buttons when hovering': 'Farbe von Schaltflächen beim drüberstreichen',
'Color of bottom of Buttons when not pressed': 'Farbe der unteren Seite von Schaltflächen die nicht gedrückt sind',
'Color of bottom of Buttons when pressed': 'Farbe der unteren Seite von Schaltflächen beim Drücken von Tasten',
'Color of dropdown menus': 'Farbe des Dropdown-Menüs',
'Color of selected Input fields': 'Farbe der ausgewählten Eingabefelder',
'Color of selected menu items': 'Farbe ausgewählter Menüpunkte',
'Columns, pilasters, corbels': 'Säulen, Pfeiler, Konsolen',
'Combined Method': 'Kombinierte Methode',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Kommen Sie später noch einmal wieder. Jeder der diese Seite besucht hat derzeit wahrscheinlich das gleiche Problem wie Sie :-( .',
'Come back later.': 'Kommen Sie doch später noch einmal wieder :-( ',
'Comments': 'Kommentare',
'Comments permitted?': 'Kommentare zugelassen?',
'Commercial/Offices': 'Kommerziell / Büros',
'Commit Date': 'Datum der Einstellung',
'Commit from %s': 'Einstellung von %s',
'Commit': 'Zusage',
'Commit Status': 'Status der Zusage',
'Commiting a changed spreadsheet to the database': 'Ein verändertes Spreadsheet in der Datenbank einstellen.',
'Commitment Added': 'Zusage hinzugefügt',
'Commitment Canceled': 'Zusage abgebrochen',
'Commitment Details': 'Details zur Zusage',
'Commitment Item Details': 'Details zum zugesagten Artikel',
'Commitment Item added': 'Zugesagten Artikel hinzugefügt',
'Commitment Item deleted': 'Zugesagten Artikel gelöscht',
'Commitment Item updated': 'Zugesagten Artikel aktualisiert',
'Commitment Items': 'Zugesagte Artikel',
'Commitment Status': 'Status der Zusage',
'Commitment Updated': 'Zusage aktualisiert',
'Commitment': 'Zusage',
'Commitments': 'Zusagen',
'Committed By': 'Zugesagt durch',
'Committed': 'Zugesagt',
'Committed Items': 'Zugesagte Artikel',
'Committed Skills': 'Zugesagte Fähigkeiten',
'Committing Inventory': 'Zusageninventar',
'Communication problems': 'Kommunikationsprobleme',
'Community Health Center': 'Gesundheitszentrum der Gemeinschaft',
'Community Member': 'Mitglied der Gemeinschaft',
'Competencies': 'Kompetenzen',
'Competency Details': 'Details zu den Kompetenzen',
'Competency Rating Catalog': 'Kompetenzbewertungskatalog',
'Competency Rating Details': 'Details zur Kompetenzbewertung',
'Competency Rating added': 'Kompetenzbewertung hinzugefügt',
'Competency Rating deleted': 'Kompetenzbewertung gelöscht',
'Competency Rating updated': 'Kompetenzbewertung aktualisiert',
'Competency Ratings': 'Kompetenzbewertungen',
'Competency added': 'Kompetenz hinzugefügt',
'Competency deleted': 'Kompetenz gelöscht',
'Competency updated': 'Kompetenz aktualisiert',
'Competency': 'Kompetenz',
'Complete': 'Vollständig',
'Completed': 'Beendet',
'Complete Stock Adjustment': 'Anpassen des gesamten Bestandes',
'Completion Question': 'Abschlussfrage',
'Complexion': 'Gesichtsfarbe',
'Compose': 'Erstellen',
'Compromised': 'Gefährdet',
'Concrete frame': 'Betonrahmen',
'Concrete shear wall': 'Betonscherwand',
'Condition': 'Bedingung',
'Conduct a Disaster Assessment': 'Durchführung einer Katastrophenbeurteilung',
'Configuration': 'Konfiguration',
'Configurations': 'Konfigurationen',
'Configure Run-time Settings': 'Laufzeiteinstellungen konfigurieren',
'Confirm Shipment Received': 'Bestätigen der erhaltenen Lieferung',
'Confirmed': 'Bestätigt',
'Confirming Organization': 'Organisation bestätigen',
'Conflict Details': 'Details zum Konflikt',
'Conflict Resolution': 'Konfliktlösung',
'Connection': 'Verbindung',
'Connect Parser': 'Verbindungsparser',
'Consignment Note': 'Warenbegleitschein',
'Constraints Only': 'Nur Bedingungen',
'Consumable': 'Verbrauchsartikel',
'Contact Data': 'Kontakt Daten',
'Contact Details': 'Details zum Kontakt',
'Contact Info': 'Kontaktinformationen',
'Contact Information Added': 'Konraktinformationen hinzugefuegt.',
'Contact Information Deleted': 'Kontaktinformationen gelöscht',
'Contact Information Updated': 'Kontakt Informationen aktualisiert',
'Contact Information': 'Kontaktinformationen',
'Contact Method': 'Kontaktmethode',
'Contact Name': 'Name des Ansprechpartners',
'Contact Person': 'Kontaktperson',
'Contact Person / Camp Owner': 'Kontaktperson / Camp-Betreiber',
'Contact Phone': 'Telefonnummer des Kontaktes',
'Contact details': 'Details zum Kontakt',
'Contact information added': 'Kontaktinformationen hinzugefügt',
'Contact information deleted': 'Kontaktinformationen gelöscht',
'Contact information updated': 'Kontaktinformationen aktualisiert',
'Contact Us': 'Kontaktieren Sie uns',
'Contact us': 'Kontaktieren Sie uns',
'Contact': 'Kontakt',
'Contacts': 'Kontakte',
'Content': 'Inhalt',
'Contents': 'Inhalte',
'Content Management': 'Content Management',
'Content Management System': 'Content Management System',
'Contract End Date': 'Ablaufzeit des Vertrags',
'Contributor': 'Mitwirkung',
'Conversion Tool': 'Umrechnungstool',
'Cooking NFIs': 'Kochen NFIs',
'Cooking Oil': 'Speiseöl',
'Coordinate Conversion': 'Koordinatentransformation',
'Coping Activities': 'Bewältigungsaktivitäten',
'Copy': 'Kopieren',
'Cost Type': 'Kostentyp',
'Cost per Megabyte': 'Kosten pro Megabyte',
'Cost per Minute': 'Kosten pro Minute',
'Count': 'Zahl',
'Country of Residence': 'Land des Wohnsitzes',
'Country': 'Land',
'County': 'Bezirk',
'County / District': 'Kreis / Bezirk',
'Course Catalog': 'Katalog der Kurse',
'Course Certificate Details': 'Details zum Kurszertifikat ',
'Course Certificate added': 'Kurszertifikat hinzugefügt',
'Course Certificate deleted': 'Kurszertifikat gelöscht',
'Course Certificate updated': 'Kurszertifikat aktualisiert',
'Course Certificates': 'Kurszertifikate',
'Course Details': 'Details zum Kurs',
'Course added': 'Kurs hinzugefügt',
'Course deleted': 'Kurs gelöscht',
'Course updated': 'Kurs aktualisiert',
'Course': 'Kurs',
'Create': 'Anlegen',
'Create & manage Distribution groups to receive Alerts': 'Erstellen und Verwalten von Verteilergruppen um Warnhinweise zu empfangen',
'Create Activity Report': 'Aktivitätsreport erstellen',
'Create Activity Type': 'Aktivitätstyp erstellen',
'Create Activity': 'Aktivität erstellen',
'Create Airport': 'Fluhafen erstellen',
'Create Assessment': 'Beurteilung erstellen',
'Create Asset': 'Anlage erstellen',
'Create Bed Type': 'Bettentyp erstellen',
'Create Brand': 'Marke erstellen',
'Create Budget': 'Budget erstellen',
'Create Bundle': 'Produktpaket erstellen',
'Create Case': 'Fall erstellen',
'Create Catalog Item': 'Katalogeintrag erstellen',
'Create Catalog': 'Katalog erstellen',
'Create Certificate': 'Zertifikat erstellen',
'Create Checklist': 'Prüfliste erstellen',
'Create Cholera Treatment Capability Information': 'Fügen Sie Informationen zur Möglichkeit der Behandlung von Cholerafällen hinzu',
'Create Cluster Subsector': 'Cluster Teilbereich erstellen',
'Create Cluster': 'Cluster erstellen',
'Create Competency Rating': 'Kompetenzbewertung erstellen',
'Create Contact': 'Kontaktperson erstellen',
'Create Course': 'Kurs erstellen',
'Create Dead Body Report': 'Leichenbericht erstellen',
'Create Department': 'Abteilung erstellen',
'Create Event': 'Neues Ereignis erstellen',
'Create Event Type': 'Ereignistyp erstellen',
'Create Facility': 'Einrichtung erstellen',
'Create Facility Type': 'Einrichtungstyp erstellen',
'Create Feature Layer': 'Kartenebene für Objektart erstellen',
'Create Group Entry': 'Gruppeneintrag erstellen',
'Create Group': 'Gruppe erstellen',
'Create Heliport': 'Hubschrauberlandeplatz erstellen',
'Create Hospital': 'Krankenhaus erstellen',
'Create Identification Report': 'Identifizierungsbericht erstellen',
'Create Impact Assessment': 'Folgenabschätzung erstellen',
'Create Incident Report': 'Vorfallbericht erstellen',
'Create Incident Type': 'Vorfalltyp erstellen',
'Create Incident': 'Vorfall erstellen',
'Create Item Category': 'Element Kategorie erstellen',
'Create Item Pack': 'Artikelgruppe erstellen',
'Create Item': 'Neuen Artikel anlegen',
'Create Job Title': 'Berufsbezeichnung erstellen',
'Create Kit': 'Ausstattung (Kit) anlegen',
'Create Kitting': 'Kit zusammenstellen',
'Create Layer': 'Kartenebene anlegen',
'Create Location': 'Standort anlegen',
'Create Location Hierarchy': 'Standorthierarchie anlegen',
'Create Map Profile': 'Kartenkonfiguration anlegen',
'Create Map Style': 'Kartensymbolisierung erstellen',
'Create Marker': 'Marker/Symbol anlegen',
'Create Member': 'Mitglied erstellen',
'Create Membership Type': 'Mitgliedstyp erstellen',
'Create Mobile Impact Assessment': 'Erstellen Sie Mobile Folgenabschätzung',
'Create Office': 'Büro anlegen',
'Create Office Type': 'Bürotyp anlegen',
'Create Organization': 'Organisation anlegen',
'Create Organization Type': 'Organisationstyp anlegen',
'Create Personal Effects': 'Persönlicher Habe anlegen',
'Create PoI Type': 'PoI-Typ erstellen',
'Create Point of Interest': 'PoI erstellen',
'Create Post': 'POST erstellen',
'Create Program': 'Programm erstellen',
'Create Project': 'Projekt anlegen',
'Create Projection': 'Kartenprojektion anlegen',
'Create Rapid Assessment': 'Schnell-Beurteilung anlegen',
'Create Report': 'Bericht anlegen',
'Create Repository': 'Repository anlegen',
'Create Request': 'Anfrage anlegen',
'Create Request Template': 'Anfragevorlage anlegen',
'Create Resource': 'Ressource anlegen',
'Create River': 'Neuen Fluss anlegen',
'Create Role': 'Neue Rolle anlegen',
'Create Room': 'Neues Zimmer anlegen',
'Create Seaport': 'Seehafen erstellen',
'Create Scenario': 'Neues Szenario anlegen',
'Create Sector': 'Neuen Bereich anlegen',
'Create Series': 'Serie erstellen',
'Create Service Profile': 'Neues Leistungsprofil anlegen',
'Create Shelter Service': 'Neue Unterkunft anlegen',
'Create Shelter Type': 'Neue Art der Unterkunft anlegen',
'Create Shelter': 'Neue Unterkunft anlegen',
'Create Skill Type': 'Art der Qualifikation / Fähigkeit anlegen',
'Create Skill': 'Fähigkeiten / Qualifikationen anlegen',
'Create Staff Member': 'Neuen Mitarbeiter anlegen',
'Create Staff Type': 'Mitarbeitertyp erstellen',
'Create Status': 'Neuen Status anlegen',
'Create Supplier': 'Neuen Lieferanten anlegen',
'Create Task': 'Neue Aufgabe anlegen',
'Create Theme': 'Neues Thema anlegen',
'Create User': 'Neuen Benutzer anlegen',
'Create Training Event': 'Neuen Schulungskurs anlegen',
'Create Vehicle': 'Fahrzeug erstellen',
'Create Vehicle Type': 'Fahrzeugtyp erstellen',
'Create Volunteer': 'Neuen Freiwilligen anlegen',
'Create Volunteer Role': 'Freiwilligenrolle erstellen',
'Create Warehouse': 'Neues Warenlager anlegen',
'Create Warehouse Type': 'Warenlagertyp erstellen',
'Create a Person': 'Neue Person anlegen',
'Create a group entry in the registry.': 'Erstellen Sie eine neue Gruppe in der Registry.',
'Create, enter, and manage surveys.': 'Erstellen, Eingabe und Verwaltung von Umfragen.',
'Created By': 'Erstellt von',
'Created On': 'Erstellt am',
'Creation of Surveys': 'Erstellung von Umfragen',
'Credential Details': 'Details zur Qualifikation',
'Credential added': 'Qualifikation hinzugefügt',
'Credential deleted': 'Qualifikation gelöscht',
'Credential updated': 'Qualifikation aktualisiert',
'Credentialling Organization': 'Bescheinigende Organisation',
'Credentials': 'Qualifikationen',
'Credit Card': 'Kreditkarte',
'Crime': 'Kriminalität',
'Criteria': 'Kriterien',
'CTN': 'CTN',
'Currency': 'Währung',
'Current': 'Aktuell',
'Current Address': 'Aktuelle Adresse',
'Current Entries': 'Aktuelle Einträge',
'Current Group Members': 'Aktuelle Gruppemmitglieder',
'Current Home Address': 'Aktuelle Heimatadresse',
'Current Identities': 'Aktuelle Identitäten',
'Current Location': 'Aktueller Standort',
'Current Log Entries': 'Aktuelle Protokolleinträge',
'Current Memberships': 'Aktuelle Mitgliedschaften',
'Current Needs': 'Aktuelle Bedarfsmeldungen',
'Current Records': 'Aktuelle Datensätze',
'Current Registrations': 'Aktuellen Registrierungen',
'Current Status': 'Aktueller Status',
'Current Team Members': 'Aktuelle Team Mitglieder',
'Current Twitter account': 'Aktueller Benutzeraccount bei Twitter',
'Current community priorities': 'Aktuelle Priorisierung in der Community',
'Current general needs': 'Aktueller allgemeiner Bedarf',
'Current greatest needs of vulnerable groups': 'Wichtigste Bedürfnisse der gefährdeten Gruppen',
'Current health problems': 'Derzeitige Gesundheitsprobleme',
'Current number of patients': 'Aktuelle Anzahl von Patienten',
'Current problems, categories': 'Aktuelle Probleme, Kategorien',
'Current problems, details': 'Aktuelle Probleme, Details',
'Current request': 'Aktuelle Anfrage',
'Current response': 'Aktuelle Antwort',
'Current session': 'Aktuelle Sitzung',
'Currently no Certifications registered': 'Derzeit sind keine Zertifizierungen registriert',
'Currently no Competencies registered': 'Derzeit sind keine Kompetenzen registriert',
'Currently no Course Certificates registered': 'Derzeit sind keine Kurszertifikate registriert',
'Currently no Credentials registered': 'Derzeit sind keine Qualifikationen registriert',
'Currently no Missions registered': 'Derzeit sind keine Aufträge registriert',
'Currently no Skill Equivalences registered': 'Derzeit sind keine Fähigkeits-Vergleichbarkeiten registriert',
'Currently no Trainings registered': 'Derzeit keine Schulungen registriert',
'Currently no entries in the catalog': 'Derzeit keine Einträge im Katalog',
'Customs Capacity': 'Zollkapazität',
'Customs Warehousing Storage Capacity': 'Zollwarenlager Kapazität',
'DNA Profile': 'DNA Profil',
'DNA Profiling': 'DNS-Profiling',
'Dam Overflow': 'Dam Überlauf',
'Damage': 'Beschädigung',
'Dangerous Person': 'Gefährliche Person',
'Data uploaded': 'Daten hochgeladen',
'Data': 'Daten',
'Database': 'Datenbank',
'Date & Time': 'Datum und Zeit',
'Date Available': 'Verfügbar ab',
'Date Created': 'Erstellt am',
'Date Due': 'Fällig am',
'Date for Follow-up': 'Wiedervorlage am',
'Date Joined': 'Eintrittsdatum',
'Date Modified': 'Geändert am',
'Date Published': 'Veröffentlicht am',
'Date Question': 'Gefragt am',
'Date Received': 'Erhalten am',
'Date Released': 'Datum der Veröffentlichung',
'Date Requested': 'Angefordert am',
'Date Required': 'Benötigt am',
'Date Required Until': 'Benötigt bis',
'Date Needed By': 'Benötigt ab',
'Date Sent': 'Gesendet am',
'Date Taken': 'Verwendet am',
'Date Until': 'Datum bis',
'Date and Time': 'Datum und Zeit',
'Date and time this report relates to.': 'Datum und Uhrzeit auf die sich dieser Bericht bezieht.',
'Date of Birth': 'Geburtsdatum',
'Date of Latest Information on Beneficiaries Reached': 'Datum von aktuellen Informationen der Finanzhilfen erreicht',
'Date of Report': 'Datum des Berichts',
'Date resigned': 'Datum der Kündigung',
'Date': 'Datum',
'Date/Time of Find': 'Datum/Zeit des Fundes',
'Date/Time when found': 'Datum / Uhrzeit, wann festgestellt',
'Date/Time when last seen': 'Datum / Uhrzeit, wann zuletzt gesehen',
'Date/Time': 'Datum/Zeit',
'De-duplicate': 'Bestätige Duplikat',
'De-duplicator': 'Duplikate entfernen',
'Dead Body Details': 'Details zur Leiche ',
'Dead Body Reports': 'Leichenbericht',
'Dead Body': 'Leiche',
'Dead body report added': 'Leichenbericht hinzugefügt',
'Dead body report deleted': 'Leichenbericht gelöscht',
'Dead body report updated': 'Leichenbericht aktualisiert',
'Deaths in the past 24h': 'Tote der letzten 24h',
'Deaths/24hrs': 'Todesfälle/24std',
'Decimal Degrees': 'Dezimalgrade',
'Decision': 'Entscheidung',
'Decomposed': 'Zerlegt',
'Default Base layer?': 'Standard Hintergrundkartenebene?',
'Default Location': 'Standard Gebiet/Standort',
'Default Height of the map window.': 'Standardhöhe des Kartenfensters',
'Default Map': 'Standard-Kartenfenster',
'Default Marker': 'Standardsymbol',
'Default Width of the map window.': 'Standardbreite des Kartenfensters.',
'Default map question': 'Standard Kartenfrage',
'Default?': 'Standard?',
'Default synchronization policy': 'Standard-Synchronisationsverfahren',
'Defecation area for animals': 'Kotbereich für Tiere',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Definieren Sie Szenarien für die Zuordnung der entsprechenden Ressourcen (Menschen, Anlagen und Einrichtungen).',
'Defines the icon used for display of features on handheld GPS.': 'Definiert das Symbol, welches für die Anzeige der Objekte auf mobilen GPS-Geräten verwendet wird.',
'Defines the icon used for display of features on interactive map & KML exports.': 'Definiert das Symbol, welches für die Anzeige der Objekte auf der interaktiven Karte sowie für die KML Exporte verwendet wird.',
'Defines the marker used for display & the attributes visible in the popup.': 'Definiert das Symbol, das für die Anzeige und die Attribute im Popup-Fenster verwendet wird.',
'Degrees must be a number between -180 and 180': 'Grad muss eine Zahl zwischen -180 und 180 sein.',
'Delete Alternative Item': 'Alternativen Artikel löschen',
'Delete Assessment Summary': 'Zusammenfassung der Beurteilung löschen',
'Delete Assessment': 'Beurteilung löschen',
'Delete Asset Log Entry': 'Löschen des Protokolleintrags der Anlage',
'Delete Asset': 'Anlage löschen',
'Delete Baseline Type': 'Lösche Typ des Referenzdatums',
'Delete Baseline': 'Referenzdatum löschen',
'Delete Brand': 'Lösche Marke',
'Delete Budget': 'Lösche Budget',
'Delete Bundle': 'Produktpaket löschen',
'Delete Catalog Item': 'Lösche Katalogeintrag',
'Delete Catalog': 'Katalog löschen',
'Delete Certificate': 'Zertifikat löschen',
'Delete Certification': 'Delete Zertifizierung',
'Delete Cluster Subsector': 'Cluster Teilbereich löschen',
'Delete Cluster': 'Cluster löschen',
'Delete Commitment Item': 'Zugesagten Artikel löschen',
'Delete Commitment': 'Zusage löschen',
'Delete Competency Rating': 'Kompetenzbewertung löschen',
'Delete Competency': 'Kompetenz löschen',
'Delete Contact Information': 'Kontaktinformation löschen',
'Delete Course Certificate': 'Lösche Kurszertifikat',
'Delete Course': 'Lösche Kurs',
'Delete Credential': 'Qualifikation löschen',
'Delete Document': 'Dokument löschen',
'Delete Donor': 'Spender löschen',
'Delete Entry': 'Eintrag löschen',
'Delete Event Type': 'Ereignistyp löschen',
'Delete Facility Type': 'Anlagentyp löschen',
'Delete Feature Layer': 'Lösche Objekt Kartenebene',
'Delete Group': 'Gruppe löschen',
'Delete Hospital': 'Krankenhaus löschen',
'Delete Image': 'Grafik löschen',
'Delete Impact Type': 'Löschen des Auswirkungstyps',
'Delete Impact': 'Auswirkung löschen',
'Delete Incident Report': 'Vorfallbericht löschen',
'Delete Item Category': 'Artikel Kategorie löschen',
'Delete Item Pack': 'Artikelgruppe löschen',
'Delete Item': 'Eintrag löschen',
'Delete Job Role': 'Tätigkeit löschen',
'Delete Key': 'Schlüssel löschen',
'Delete Kit': 'Ausstattung (Kit) löschen',
'Delete Layer': 'Ebene löschen',
'Delete Level 1 Assessment': 'Stufe 1 Beurteilung löschen',
'Delete Level 2 Assessment': 'Stufe 2 Beurteilung löschen',
'Delete Location': 'Standort löschen',
'Delete Map Profile': 'Kartenkonfiguration löschen',
'Delete Marker': 'Marker/Symbol löschen',
'Delete Membership': 'Mitgliedschaft löschen',
'Delete Message': 'Nachricht löschen',
'Delete Mission': 'Auftrag löschen',
'Delete Need Type': 'Anforderungstyp löschen',
'Delete Need': 'Anforderung löschen',
'Delete Office': 'Büro löschen',
'Delete Office Type': 'Bürotyp löschen',
'Delete Organization': 'Organisation löschen',
'Delete Organization Type': 'Organisationstyp löschen',
'Delete Peer': 'Peer löschen',
'Delete Person': 'Benutzer löschen',
'Delete Photo': 'Foto löschen',
'Delete Population Statistic': 'Bevölkerungsstatistik löschen',
'Delete Position': 'Position löschen',
'Delete Project': 'Projekt löschen',
'Delete Projection': 'Koordinatensystemprojektion löschen',
'Delete Rapid Assessment': 'Schnell-Beurteilung löschen',
'Delete Received Item': 'Erhaltenen Artikel löschen',
'Delete Received Shipment': 'Erhaltene Lieferung löschen',
'Delete Record': 'Datensatz löschen',
'Delete Report': 'Bericht löschen',
'Delete Request Item': 'Lösche das Anfrageelement',
'Delete Request': 'Lösche die Anfrage',
'Delete Resource': 'Lösche die Ressource',
'Delete Room': 'Raum löschen',
'Delete Scenario': 'Szenario löschen',
'Delete Section': 'Lösche Abschnitt',
'Delete Sector': 'Lösche Bereich',
'Delete Sent Item': 'Lösche gesendeten Artikel',
'Delete Sent Shipment': 'Lösche gesendete Lieferung',
'Delete Service Profile': 'Service-Profil löschen',
'Delete Setting': 'Einstellung löschen',
'Delete Skill Equivalence': 'Fähigkeits-Vergleichbarkeit löschen',
'Delete Skill Provision': 'Fähigkeits-Bereitstellung löschen',
'Delete Skill Type': 'Löschen des Typs der Befähigung',
'Delete Skill': 'Befähigung löschen',
'Delete Staff Type': 'Mitarbeitertyp löschen',
'Delete Status': 'Status löschen',
'Delete Subscription': 'Abonnement löschen',
'Delete Subsector': 'Teilbereich löschen',
'Delete Survey Answer': 'Umfrage - Antwort Löschen',
'Delete Survey Question': 'Umfrage - Frage löschen',
'Delete Survey Series': 'Umfrage Serie löschen',
'Delete Survey Template': 'Umfrage Vorlage löschen',
'Delete Training': 'Schulung löschen',
'Delete Unit': 'Einheit löschen',
'Delete User': 'Benutzer löschen',
'Delete Volunteer': 'Freiwilligen löschen',
'Delete Warehouse': 'Warenlager löschen',
'Delete from Server?': 'Vom Server löschen?',
'Delete': 'Löschen',
'Deliver To': 'Liefern an',
'Delphi Decision Maker': 'Delphi Entscheidungsträger',
'Demographic': 'Demografisch',
'Demonstrations': 'Vorführungen',
'Dental Examination': 'Zahnärztliche Prüfung',
'Dental Profile': 'Zahnärztliches Profil',
'Department / Unit': 'Abteilung / Einheit',
'Department Catalog': 'Abteilungskatalog',
'Dependent Person': 'Abhängige Person',
'Describe the condition of the roads to your hospital.': 'Beschreiben Sie den Zustand der Strassen zu Ihrem Krankenhaus.',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'Beschreiben Sie den Arbeitsablauf der sich auf diesen Eintrag bezieht (z. B. \\ " ärztliche Untersuchung")',
'Description of Contacts': 'Beschreibung der Kontakte',
'Description of defecation area': 'Beschreibung der Sanitäranlagen',
'Description of drinking water source': 'Beschreibung der Herkunft des Trinkwassers',
'Description of sanitary water source': 'Beschreibung der Herkunft des Sanitärwassers',
'Description of water source before the disaster': 'Beschreibung der Herkunft des Wassers vor der Katastrophe',
'Description': 'Beschreibung',
'Desire to remain with family': 'Wunsch bei der Familie zu bleiben',
'Destination': 'Ziel',
'Destroyed': 'Zerstört',
'Details field is required!': 'Detailfeld ist erforderlich!',
'Dialysis': 'Dialyse',
'Diaphragms, horizontal bracing': 'Membranen, horizontal stützen',
'Diarrhea': 'Durchfall',
'Dignitary Visit': 'Besuch des Würdenträgers',
'Direction': 'Richtung',
'Disable': 'Deaktivieren',
'Disabled participating in coping activities': 'Behinderte beteiligen sich an Bewältigungsaktivitäten',
'Disabled': 'Deaktiviert',
'Disabled?': 'Behindert?',
'Disappeared': 'Untergetaucht',
'Disaster Assessments': 'Katastrophenbeurteilungen',
'Disaster Victim Identification': 'Katastrophen Opferidentifikation',
'Disaster Victim Registry': 'Katastrophen Opferverzeichnis',
'Disaster clean-up/repairs': 'Katastrophen Reinigung/Reparaturen',
'Discharge (cusecs)': 'Ausfluss',
'Discharges/24hrs': 'Abfluss/24 Stunden',
'Discussion Forum on item': 'Diskussionsforum über Eintrag',
'Discussion Forum': 'Diskussionsforum',
'Disease vectors': 'Krankheitsvektoren',
'Dispensary': 'Ambulatorium',
'Displaced Populations': 'Heimatlose Bevölkerung',
'Displaced': 'Vertriebenen',
'Display Polygons?': 'Anzeige Polygone?',
'Display Routes?': 'Anzeige Routen?',
'Display Tracks?': 'Anzeige Wege?',
'Display Waypoints?': 'Anzeige Wegpunkte?',
'Distance between defecation area and water source': 'Distanz zwischen Sanitärbereich und Wasserquelle',
'Distance from %s:': 'Abstand von %s:',
'Distance(Kms)': 'Distanz (km)',
'Distribution groups': 'Verteilergruppen',
'Distribution': 'Verteilung',
'District': 'Bezirk',
'Rural District / District': 'Landkreis / Kreis',
'Do you really want to delete these records?': 'Sollen diese Datensätze wirklich gelöscht werden?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Möchten Sie diese erhaltene Lieferung stornieren? Die Artikel werden aus dem Bestand entfernt werden. Diese Aktion kann NICHT rückgängig gemacht werden!',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Möchten Sie diese abgeschickte Sendung wirklich stornieren? Die Artikel werden an die Bestandserfassung zurückgegeben werden. Diese Aktion kann NICHT rückgängig gemacht werden!',
'Do you want to receive this shipment?': 'Wollen Sie die Lieferung empfangen?',
'Do you want to send these Committed items?': 'Wollen Sie die zugesagten Artikel schicken?',
'Do you want to send this shipment?': 'Wollen Sie diese Lieferung abschicken?',
'Document Details': 'Details zum Dokument',
'Document Scan': 'Dokument Scannen',
'Document added': 'Dokument hinzugefügt',
'Document deleted': 'Dokument gelöscht',
'Document updated': 'Dokument aktualisiert',
'Documents and Photos': 'Dokumente und Fotos',
'Documents': 'Dokumente',
'Does this facility provide a cholera treatment center?': 'Verfügt diese Einrichtung über ein Behandlungscenter für Cholera?',
'Doing nothing (no structured activity)': 'Untätig (keine strukturierte Aktivität)',
'Dollars': 'Dollar',
'Domain': 'Domäne',
'Domestic chores': 'Hausarbeit',
'Donated': 'Gespendet',
'Donating Organization': 'Spendende Organisationen',
'Donation': 'Spende',
'Donations': 'Spenden',
'Donation Certificate': 'Spendenzertifikat',
'Donations Needed': 'Spenden benötigt',
'Donation Phone #': 'Spender Telefon #',
'Donor Details': 'Details zum Spender',
'Donor added': 'Spender hinzugefügt',
'Donor deleted': 'Spender gelöscht',
'Donor updated': 'Spender aktualisiert',
'Donor': 'Spender',
'Donors Report': 'Bericht zu Spendern',
'Donors': 'Spender',
'Door frame': 'Türrahmen',
'Download PDF': 'PDF herunterladen',
'Download Template': 'Vorlage herunterladen',
'Draft': 'Entwurf',
'Drainage': 'Abfluß',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Aufstellung eines Budgets für Mitarbeiter und Ausrüstung über mehrere Standorte',
'Drill Down by Group': 'Recherche nach Gruppe',
'Drill Down by Incident': 'Recherche nach Vorfall',
'Drill Down by Shelter': 'Recherche nach Unterkunft',
'Drivers': 'Fahrer',
'Driver Phone Number': 'Telefonnummer des Fahrers',
'Driving License': 'Führerschein',
'Drought': 'Dürre',
'Drop-off Location for Goods?': 'Sammelstelle für Sachspenden?',
'Drugs': 'Drogen',
'Dry Dock': 'Trockendock',
'Due Follow-ups': 'Fällige Wiedervorlagen',
'Dug Well': 'Schachtbrunnen',
'Duplicate?': 'Duplikat?',
'Dust Storm': 'Staub Sturm',
'Dwelling': 'Wohnstätte',
'EMS Reason': 'EMS Grund',
'ER Status Reason': 'Status Notaufnahme Grund',
'ER Status': 'Status Notaufnahme',
'Early Recovery': 'Frühe Besserung / Bergung',
'Earthquake': 'Erdbeben',
'EasyOpt Number': 'EasyOpt Nummber',
'Edit Activity': 'Aktivität bearbeiten',
'Edit Address': 'Adresse bearbeiten',
'Edit Alternative Item': 'Alternativen Artikel bearbeiten',
'Edit Application': 'Anwendung bearbeiten',
'Edit Appointment': 'Termin bearbeiten',
'Edit Assessment Summary': 'Zusammenfassung fuer die Beurteilung bearbeiten',
'Edit Assessment': 'Beurteilung bearbeiten',
'Edit Asset Log Entry': 'Protokolleintrag der Beurteilung bearbeiten',
'Edit Asset': 'Beurteilung bearbeiten',
'Edit Baseline Type': 'Bearbeiten des Typs des Referenzdatums',
'Edit Baseline': 'Referenzdatum bearbeiten',
'Edit Brand': 'Marke bearbeiten',
'Edit Budget': 'Budget bearbeiten',
'Edit Bundle': 'Produktpaket bearbeiten',
'Edit Camp Service': 'Camp Leistung bearbeiten',
'Edit Camp Type': 'Camptyp bearbeiten',
'Edit Camp': 'Camp bearbeiten',
'Edit Catalog Item': 'Katalogeintrag bearbeiten',
'Edit Catalog': 'Katalog bearbeiten',
'Edit Certificate': 'Zertifikat bearbeiten',
'Edit Certification': 'Zertifizierung bearbeiten',
'Edit Cluster Subsector': 'Cluster Teilbereich bearbeiten',
'Edit Cluster': 'Cluster bearbeiten',
'Edit Commitment Item': 'Zugesagten Artikel bearbeiten',
'Edit Commitment': 'Zusage bearbeiten',
'Edit Competency Rating': 'Kompetenzbewertung bearbeiten',
'Edit Competency': 'Kompetenz bearbeiten',
'Edit Contact Information': 'Kontaktinformation bearbeiten',
'Edit Contact': 'Kontakt bearbeiten',
'Edit Contents': 'Inhalt bearbeiten',
'Edit Course Certificate': 'Kurszertifikat bearbeiten',
'Edit Course': 'Kurs bearbeiten',
'Edit Credential': 'Qualifikation bearbeiten',
'Edit Dead Body Details': 'Leichendetails bearbeiten',
'Edit Description': 'Beschreibung bearbeiten',
'Edit Details': 'Details bearbeiten',
'Edit Disaster Victims': 'Katastrophenopfer bearbeiten',
'Edit Document': 'Dokument bearbeiten',
'Edit Donor': 'Spender bearbeiten',
'Edit Email Settings': 'Email Einstellungen bearbeiten',
'Edit Entry': 'Eintrag bearbeiten',
'Edit Event': 'Ereignis bearbeiten',
'Edit Event Type': 'Ereignistyp bearbeiten',
'Edit Facility': 'Einrichtung bearbeiten',
'Edit Facility Type': 'Einrichtungstyp bearbeiten',
'Edit Feature Layer': 'Edit Objektlayer',
'Edit Flood Report': 'Flut Bericht Bearbeiten',
'Edit Gateway Settings': 'Gateway-Einstellungen bearbeiten',
'Edit Group': 'Gruppe bearbeiten',
'Edit Hospital': 'Krankenhaus bearbeiten',
'Edit Human Resource': 'Personelle Ressource bearbeiten',
'Edit Identification Report': 'Identifizierungsbericht bearbeiten',
'Edit Identity': 'Identität bearbeiten',
'Edit Image Details': 'Bild Details bearbeiten',
'Edit Impact Type': 'Typ der Auswirkung bearbeiten',
'Edit Impact': 'Auswirkungen bearbeiten',
'Edit Incident Report': 'Vorfallsbericht bearbeiten',
'Edit Inventory Item': 'Artikel des Bestands bearbeiten',
'Edit Item Category': 'Kategorie des Artikel bearbeiten',
'Edit Item Pack': 'Artikelgruppe bearbeiten',
'Edit Item': 'Artikel bearbeiten',
'Edit Job Role': 'Tätigkeit bearbeiten',
'Edit Key': 'Schlüssel bearbeiten',
'Edit Kit': 'Ausstattung (Kit) bearbeiten',
'Edit Layer': 'Kartenebene bearbeiten',
'Edit Level %d Locations?': 'Bearbeiten von Level %en Standorten?',
'Edit Level 1 Assessment': 'Stufe 1 Beurteilung bearbeiten',
'Edit Level 2 Assessment': 'Stufe 2 Beurteilung bearbeiten',
'Edit Location': 'Standort (Position) bearbeiten',
'Edit Log Entry': 'Protokolleintrag bearbeiten',
'Edit Map Profile': 'Kartenkonfiguration bearbeiten',
'Edit Map Services': 'Kartendienste bearbeiten',
'Edit Marker': 'Marker/Symbol bearbeiten',
'Edit Membership': 'Mitgliedschaft bearbeiten',
'Edit Message': 'Nachricht bearbeiten',
'Edit Messaging Settings': 'Messaging-Einstellungen bearbeiten',
'Edit Mission': 'Auftrag bearbeiten',
'Edit Modem Settings': 'Modem Settings bearbeiten',
'Edit Need Type': 'Bedarfstyp bearbeiten',
'Edit Need': 'Bedarf bearbeiten',
'Edit Office': 'Büro bearbeiten',
'Edit Options': 'Optionen bearbeiten',
'Edit Organization': 'Organisation bearbeiten',
'Edit Parameters': 'Parameter bearbeiten',
'Edit Peer Details': 'Details zu Peer bearbeiten',
'Edit Person Details': 'Details zur Person bearbeiten',
'Edit Personal Effects Details': 'Details zur persönlichen Habe bearbeiten',
'Edit Photo': 'Foto bearbeiten',
'Edit Population Statistic': 'Bevölkerungsstatistik bearbeiten',
'Edit Position': 'Position bearbeiten',
'Edit Problem': 'Problem bearbeiten',
'Edit Project': 'Projekt bearbeiten',
'Edit Projection': 'Kartenprojektion bearbeiten',
'Edit Rapid Assessment': 'Schnell-Beurteilung bearbeiten',
'Edit Received Item': 'Erhaltenen Artikel bearbeiten',
'Edit Received Shipment': 'Erhaltene Lieferung bearbeiten',
'Edit Record': 'Datensatz bearbeiten',
'Edit Registration Details': 'Details zur Registrierung bearbeiten',
'Edit Registration': 'Registrierung bearbeiten',
'Edit Request Item': 'Anfrage zu Artikel bearbeiten',
'Edit Request': 'Anfrage bearbeiten',
'Edit Resource': 'Ressource bearbeiten',
'Edit River': 'Fluss bearbeiten',
'Edit Role': 'Rolle bearbeiten',
'Edit Room': 'Raum bearbeiten',
'Edit Scenario': 'Szenario bearbeiten',
'Edit Sector': 'Bereich bearbeiten',
'Edit Sent Item': 'Gesendeten Artikel bearbeiten',
'Edit Setting': 'Einstellung bearbeiten',
'Edit Settings': 'Einstellungen bearbeiten',
'Edit Shelter Service': 'Unterkunft Leistung bearbeiten',
'Edit Shelter Type': 'Typ der Unterkunft bearbeiten',
'Edit Shelter': 'Unterkunft bearbeiten',
'Edit Skill Equivalence': 'Fähigkeits-Vergleichbarkeit bearbeiten',
'Edit Skill Provision': 'Fähigkeits-Bereitstellung bearbeiten',
'Edit Skill Type': 'Typ der Fähigkeit bearbeiten',
'Edit Skill': 'Fähigkeit bearbeiten',
'Edit Solution': 'Lösung bearbeiten',
'Edit Staff Type': 'Typ von Mitarbeitern bearbeiten',
'Edit Subscription': 'Abonnement bearbeiten',
'Edit Subsector': 'Teilbereich bearbeiten',
'Edit Survey Answer': 'Umfrage - Antwort bearbeiten',
'Edit Survey Question': 'Umfrage - Frage bearbeiten',
'Edit Survey Series': 'Umfrage - Serie bearbeiten',
'Edit Survey Template': 'Umfrage Vorlage bearbeiten',
'Edit Task': 'Aufgabe bearbeiten',
'Edit Team': 'Team bearbeiten',
'Edit Theme': 'Thema bearbeiten',
'Edit Themes': 'Themen bearbeiten',
'Edit Ticket': 'Ticket bearbeiten',
'Edit Track': 'Route bearbeiten',
'Edit Training': 'Schulung bearbeiten',
'Edit Tropo Settings': 'Tropo Einstellungen bearbeiten',
'Edit User': 'Benutzer bearbeiten',
'Edit Volunteer Availability': 'Verfügbarkeit von Freiwilligem bearbeiten',
'Edit Volunteer Details': 'Details zu Freiwilligem bearbeiten',
'Edit Warehouse': 'Warenlager bearbeiten',
'Edit current record': 'Aktuellen Datensatz bearbeiten',
'Edit message': 'Nachricht bearbeiten',
'Edit': 'Bearbeiten',
'Editable?': 'Bearbeitbar?',
'Education materials received': 'Ausbildungsmaterialien erhalten',
'Education materials, source': 'Herkunft der Ausbildungsmaterialien',
'Education': 'Ausbildung/Schulung',
'Effects Inventory': 'Auswirkungsbestandliste',
'Eggs': 'Eier',
'Either a shelter or a location must be specified': 'Es muss entweder eine Unterkunft oder ein Standort angegeben werden',
'Either file upload or document URL required.': 'Es ist entweder ein Dateiupload oder ein URL erforderlich',
'Either file upload or image URL required.': 'Es ist entweder ein Dateiupload oder eine Bild-URL erforderlich',
'Elderly person headed households (>60 yrs)': 'Von älteren Menschen (>60 Jahren) geführte Haushalte',
'Electrical': 'elektrisch',
'Electrical, gas, sewerage, water, hazmats': 'Elektrik, Gas, Abwasser, Wasser, Gefahrgut',
'Elevated': 'Erhöht',
'Elevation': 'Höhe',
'Elevators': 'Aufzüge',
'Eligible for Allowance': 'Berechtigt für Taschengeld',
'Email Address': 'E-Mail-Adresse',
'Email Channels (Inbound)': 'E-Mail Kanäle (eingehend)',
'Email InBox': 'E-Mail Eingang',
'Email Settings': 'E-Mail-Einstellungen',
'Email settings updated': 'E-Mail-Einstellungen aktualisiert',
'Email': 'E-Mail',
'Embalming': 'Einbalsamierung',
'Embassy': 'Botschaft',
'Emergencies': 'Notfälle',
'Emergency': 'Notfall',
'Emergency Capacity Building project': 'Notfall-Kompetenzbildungsprojekt',
'Emergency Contacts': 'Notfallkontakte',
'Emergency Department': 'Notfall-Abteilung',
'Emergency Shelter': 'Notunterkunft',
'Emergency Support Facility': 'Notfall-Unterstützungseinrichtung',
'Emergency Support Service': 'Notfall-Unterstützungsdienst',
'Emergency Telecommunications': 'Notfall-Telekommunikation',
'Enable/Disable Layers': 'Layer aktivieren/deaktivieren',
'Enabled': 'Aktiviert',
'Enabled?': 'Aktiviert?',
'End Date': 'Enddatum',
'End date should be after start date': 'Enddatum muss nach dem Startdatum liegen',
'End date': 'Enddatum',
'End of Period': 'Ende des Zeitraums',
'Enter a GPS Coord': 'Geben Sie eine GPS Koordinate ein',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Geben Sie einen Namen für die Tabelle, die Sie hochladen an (obligatorisch).',
'Enter a new support request.': 'Geben Sie eine neue Unterstützungsanfrage ein.',
'Enter a unique label!': 'Geben Sie eine eindeutige Bezeichnung ein!',
'Enter a valid date before': 'Geben Sie zuvor eine gültiges Datum ein',
'Enter a valid email': 'Geben Sie eine gültige E-Mail-Adresse ein',
'Enter a valid future date': 'Geben Sie ein gültiges, zukünftiges Datum ein',
'Enter some characters to bring up a list of possible matches': 'Geben Sie einige Zeichen ein um eine Liste möglicher Übereinstimmungen anzuzeigen',
'Enter some characters to bring up a list of possible matches.': 'Geben Sie einige Zeichen ein um eine Liste von möglichen Übereinstimmungen anzuzeigen.',
'Enter tags separated by commas.': 'Geben Sie die Tags mit Komma getrennt ein.',
'Enter the same password as above': 'Wiederholen Sie das Kennwort von oben',
'Entered': 'Eingegeben',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Die Eingabe einer Telefonnummer ist freiwillig, sie erlaubt Ihnen aber SMS-Nachrichten zu abonnieren und zu empfangen.',
'Entry deleted': 'Eintrag gelöscht',
'Environment': 'Umgebung/Umwelt',
'Equipment': 'Ausrüstung',
'Error Tickets': 'Fehlertickets',
'Error encountered while applying the theme.': 'Bei der Anwendung des Themas ist ein Fehler aufgetreten.',
'Error in message': 'Fehler in der Nachricht',
"Error logs for '%(app)s'": 'Fehlerprotokolle für "%(app)s"',
'Errors': 'Fehler',
'ESRI Shapefile': 'ESRI Shapefile',
'Essential Staff': 'Unverzichtbarer Mitarbeiter',
'Est. Delivery Date': 'Geschätztes Lieferdatum',
'Estimated # of households who are affected by the emergency': 'Geschätzte Anzahl von Haushalten, die vom Notfall betroffen sind',
'Estimated # of people who are affected by the emergency': 'Geschätzte Anzahl von Menschen, die vom Notfall betroffen sind',
'Estimated Overall Building Damage': 'Geschätzter allgemeiner Gebäudeschaden',
'Estimated Population': 'Geschätzte Bevölkerungszahl',
'Estimated total number of people in institutions': 'Geschätzte Gesamtzahl von Menschen in Einrichtungen',
'Estimated Delivery Date': 'Voraus. Liefertermin',
'Euros': 'Euro',
'Evacuating': 'Evakuieren',
'Evacuees Capacity (Day and Night)': 'Evakuierungspotential (Tag und Nacht)',
'Evacuees Capacity (Night only)': 'Evakuierungspotential (nur Nacht)',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Informationen in dieser Nachricht bewerten. (Dieser Wert sollte NICHT in öffentlichen Warnung verwendet werden.)',
'Event Details': 'Details zum Ereignis',
'Event Type': 'Ereignistyp',
'Event Types': 'Ereignistypen',
'Event added': 'Ereignis hinzugefügt',
'Event deleted': 'Ereignis gelöscht',
'Event updated': 'Ereignis aktualisiert',
'Event': 'Ereignis',
'Events': 'Ereignisse',
'Example': 'Beispiel',
'Exceeded': 'Überschritten',
'Excellent': 'Ausgezeichnet',
'Exclude contents': 'Inhalte ausschließen',
'Excreta disposal': 'Entsorgung von Exkrementen',
'Execute a pre-planned activity identified in <instruction>': 'Ausführen einer vorausgeplanten Aktivität, identifiziert in <instruction>',
'Exercise': 'Übung',
'Exercise?': 'Übung?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Übungen bedeuten, dass alle Anzeigen eine Wassermarke & alle Benachrichtigungen ein Präfix haben.',
'Existing Placard Type': 'Vorhandener Plakattyp',
'Existing food stocks': 'Vorhandener Lebensmitelvorrat',
'Existing location cannot be converted into a group.': 'Vorhandener Standort kann nicht in eine Gruppe transformiert werden.',
'Exits': 'Ausgänge',
'Experience': 'Erfahrung',
'Expiration Date': 'Ablaufdatum',
'Expiration Report': 'Ablaufbericht',
'Expired?': 'Abgelaufen?',
'Expiring Staff Contracts Report': 'Berichte zu ablaufenden Mitarbeiterverträgen',
'Expiry Date': 'Ablaufdatum',
'Expiry (month)': 'Ablauf (Monat)',
'Expiry (months)': 'Ablauf (Monate)',
'Explosive Hazard': 'Explosionsgefahr',
'Export as': 'Exportieren als',
'Export Data': 'Daten exportieren',
'Export Database as CSV': 'Datenbank als CSV exportieren',
'Export in GPX format': 'Als GPX Format exportieren',
'Export in KML format': 'Als KML Format exportieren',
'Export in OSM format': 'Als OSM Format exportieren',
'Export in PDF format': 'In PDF Format exportieren',
'Export in RSS format': 'In RSS Format exportieren',
'Export in XLS format': 'In XLS Format exportieren',
'Exterior Only': 'Nur Externe',
'Exterior and Interior': 'Externe und Interne',
'Eye Color': 'Augenfarbe',
'Facebook Channels': 'Facebook Kanäle',
'Facial hair, color': 'Gesichtsbehaarung, Farbe',
'Facial hair, type': 'Gesichtsbehaarung, Art',
'Facial hear, length': 'Gesichtsbehaarung, Länge',
'Facility': 'Einrichtung',
'Facilities': 'Einrichtungen',
'Facility Details': 'Details zur Einrichtung',
'Facility Operations': 'Einrichtungsmanagement',
'Facility Status': 'Status der Einrichtung',
'Facility Type': 'Einrichtungstyp',
'Facility Types': 'Einrichtungstypen',
'Facility added': 'Einrichtung hinzugefügt',
'Facility or Location': 'Einrichtung oder Standort',
'Facility removed': 'Einrichtung entfernt',
'Facility updated': 'Einrichtung aktualisiert',
'Facility': 'Einrichtung',
'Fail': 'Fehlgeschlagen',
'Failed!': 'Fehlgeschlagen!',
'Fair': 'Mäßig',
'Falling Object Hazard': 'Gefahr durch herabstürzende Objekte',
'Families/HH': 'Familien/HH',
'Family tarpaulins received': 'Familien hat Planen erhalten',
'Family tarpaulins, source': 'Herkunft der Planen für Familie',
'Family': 'Familie',
'Family Members': 'Familienmitglieder',
'Family Reunification': 'Familienzusammenführung',
'Family/friends': 'Familie/Freunde',
'Farmland/fishing material assistance, Rank': 'Ackerland/Materialhilfe für Fischerei, Rang',
'Fatalities': 'Verstorbene',
'Father': 'Vater',
'Feature Layer added': 'Objekt-Layer hinzugefügt',
'Feature Layer deleted': 'Objekt-Layer gelöscht',
'Feature Layer updated': 'Objekt-Layer aktualisiert',
'Feature Layers': 'Objekt-Ebenen',
'Feature Namespace': 'Namespace des Objekts',
'Feature Request': 'Objekt-Anfrage',
'Feature Type': 'Objektart',
'Features Include': 'Beinhaltete Objekte',
'Federal State': 'Bundesland',
'Feeds': 'Newsfeeds',
'Female headed households': 'Weiblich geführte Haushalte',
'Female': 'Weiblich',
'Few': 'Wenige',
'Field Hospital': 'Feldlazarett',
'Field': 'Feld',
'File': 'Datei',
'Fill in Latitude': 'Geben Sie den Breitengrad ein',
'Fill in Longitude': 'Geben Sie den Längengrad ein',
'Filter Options': 'Filteroptionen',
'Filter by Tag': 'Nach Tag filtern',
'Filter by Location': 'Nach Standort filtern',
'Filter by Organization': 'Nach Organisation filtern',
'Filter by Date': 'Nach Datum filtern',
'Filter Field': 'Filter Feld',
'Filter Tweets by the date they were tweeted on': 'Filtere Tweets nach dem Datum der Sendung',
'Filter Tweets by who tweeted them': 'Filtere Tweets nach sendender Person',
'Filter Value': 'Filter Wert',
'Find Dead Body Report': 'Suche Leichenbericht',
'Find Hospital': 'Krankenhaus finden',
'Find Person Record': 'Personendatensatz finden',
'Find Volunteers': 'Freiwillige finden',
'Find a Person Record': 'Suche einen Personendatensatz',
'Find': 'Suchen',
'Fingerprint': 'Fingerabdruck',
'Fingerprinting': 'Fingerabdrücke machen',
'Fingerprints': 'Fingerabdrücke',
'Finished Jobs': 'Erledigte Jobs',
'Fire suppression and rescue': 'Feuer - Eindämmung und Rettung',
'Fire': 'Feuer',
'First': 'Erste',
'First Name': 'Vorname',
'First name': 'Vorname',
'Fishing': 'Fischerei',
'Flags': 'Flaggen',
'Flash Flood': 'Sturzflut',
'Flash Freeze': 'Schockfrost',
'Flexible Impact Assessments': 'Flexible Folgenabschätzungen',
'Flood Alerts show water levels in various parts of the country': 'Flut Alarme zeigen Wasserstände in verschiedenen Teilen des Landes.',
'Flood Alerts': 'Flut Alarme',
'Flood Depth': 'Fluthöhe',
'Flood Report Details': 'Details zum Flutbericht',
'Flood Report added': 'Flutbericht hinzugefügt',
'Flood Report deleted': 'Flutbericht gelöscht',
'Flood Report updated': 'Flutbericht aktualisiert',
'Flood Report': 'Flutbericht',
'Flood Reports': 'Flutberichte',
'Flood': 'Flut',
'Flow Status': 'Status des Ablaufs',
'fluent': 'fliessend',
'Fog': 'Nebel',
'Folder': 'Ordner',
'Follow up': 'Wiedervorlage',
'Follow-up required': 'Wiedervorlage erforderlich',
'Food Supply': 'Lebensmittelversorgung',
'Food assistance': 'Lebensmittel Hilfe',
'Food': 'Lebensmittel',
'Footer file %s missing!': 'Fußzeile Datei %s fehlt!',
'Footer': 'Fußzeile',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Für eine Land wäre dies der ISO2-Code, für eine Stadt wäre es der Flughafen Code.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Für jeden Sync-Partner gibt es einen standard Sync Job, der nach einem vordefiniertem Zeitintervall ausgeführt wird. Sie können auch mehrere Sync Jobs festlegen welche nach ihren Anforderungen entsprechend ausgeführt werden. Klicken Sie auf den Link rechts um zu beginnen.',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Für erweiterte Sicherheit empfiehlt sich die Eingabe eines Benutzernamens und Passworts. Bitte benachrichtigen Sie die Administratoren der anderen Geräte in Ihrem Unternehmen damit diese die Zugangsdaten unter dem Punkt Synchronization -> Sync-Partner einrichten.',
'For live help from the Sahana community on using this application, go to': 'Für direkte Hilfe von der Sahana Community zur Anwendung dieses Programmes, gehen Sie zu',
'For messages that support alert network internal functions': 'Für Nachrichten, die Netzwerkswarnungen interner Funktionen unterstützen',
'For more details on the Sahana Eden system, see the': 'Weitere Informationen zum Sahana Eden System finden Sie unter',
'For more information, see': 'Weitere Informationen finden Sie unter',
'For': 'Für',
'Forest Fire': 'Waldbrand',
'Formal camp': 'Offizielles Camp',
'Forms': 'Formulare',
'Found': 'Gefunden',
'Foundations': 'Stiftungen',
'Freezing Drizzle': 'Gefrierender Nieselregen',
'Freezing Rain': 'Gefrierender Regen',
'Freezing Spray': 'Kältespray',
'French': 'Französisch',
'Friday': 'Freitag',
'From Adress': 'Herkunftsadresse',
'From Address': 'Herkunftsadresse',
'From Facility': 'Von Einrichtung',
'From Inventory': 'Aus dem Bestand',
'From Location': 'Vom Standort',
'From Organization': 'Von der Organisation',
'From': 'Von',
'From ': 'Von ',
'Fulfil. Status': 'Status der Bedarfsdeckung',
'Fulfill Status': 'Status der Bedarfsdeckung',
'Fulfillment Status': 'Auftragserfüllungsstatus',
'Full beard': 'Vollbart',
'Full': 'vollständig, voll, ganz',
'Fullscreen Map': 'Großbild Karte',
'Functions available': 'Verfügbare Funktionen',
'Funding': 'Finanzierung',
'Funding Organization': 'Finanzierende Organisation',
'Funeral': 'Beerdigung',
'Further Action Recommended': 'Weitere Aktivität empfohlen',
'GIS Reports of Shelter': 'GIS-Berichte der Unterkünfte',
'GIS integration to view location details of the Shelter': 'GIS-Integration um Details zum Standort der Unterkunft zu erhalten',
"Google Earth's Keyhole Markup Language": "Google Earth's Keyhole Markup Language",
'GPS Marker': 'GPS Markierung/Symbol',
'GPS Track File': 'GPS Track Datei',
'GPS Track': 'GPS Track',
'GPX Track': 'GPX Track',
'GPS eXchange format': 'GPS Geräte Austauschformat',
'Gap Analysis Map': 'Karte zur Lückenanalyse',
'Gap Analysis Report': 'Bericht zur Lückenanalyse',
'Gap Analysis': 'Lückenanalyse',
'Gap Map': 'Lückenkarte',
'Gap Report': 'Bericht über Lücken',
'Gateway Settings': 'Gateway-Einstellungen',
'Gateway settings updated': 'Gateway-Einstellungen aktualisiert',
'Gateway': 'Gateway',
'Gender': 'Geschlecht',
'General Comment': 'Allgemeine Bemerkung',
'General Medical/Surgical': 'Allgemein - Medizinisch/Chirurgisch',
'General emergency and public safety': 'Allgemein - Notfall und öffentliche Sicherheit',
'General information on demographics': 'Allgemein - Informationen zur Demographie',
'General': 'Allgemein',
'Geocode': 'Geocodierung',
'Geocoder Selection': 'Geocoder Auswahl',
'Geometry Name': 'Name der Geometrie',
'Geophysical (inc. landslide)': 'Geophysikalisch (inc. Erdrutsch)',
'Geotechnical Hazards': 'Geotechnische Gefahren',
'Geotechnical': 'Geotechnisch',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Das Modul Geraldo steht innerhalb dier aktiven Python Umgebung nicht zur Verfügung - für die PDF-Ausgabe muss es nachinstalliert werden.',
'German': 'Deutsch',
'Get incoming recovery requests as RSS feed': 'Empfangen von eingehenden Bergungsanforderungen als RSS-Feed',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Kurze Beschreibung des Bildes, z. B. was ist wo auf dem Bild zu sehen ist (nicht verpflichtend).',
'Give information about where and when you have seen them': 'Geben Sie Information wo und wann Sie sie gesehen haben',
'Global Messaging Settings': 'Globale Nachrichteneinstellungen',
'Go to Request': 'Zur Anfrage',
'Go': 'Los',
'Goatee': 'Spitzbart',
'Good Condition': 'Guter Zustand',
'Good': 'Gut',
'Goods Received Note': 'Warenempfangsbestätigung',
'Government UID': 'Regierungs-UID',
'Government building': 'Regierungsgebäude',
'Government District': 'Regierungsbezirk',
'Government': 'Regierung',
'Grade': 'Klasse',
'Greek': 'Griechisch',
'Green': 'Grün',
'GRN': 'GRN',
'GRN Number': 'GRN Nummer',
'Ground movement, fissures': 'Untergrundbewegung, Risse',
'Ground movement, settlement, slips': 'Untergrundbewegung, Bodensenkung, Abrutsche',
'Group Description': 'Gruppenbeschreibung',
'Group Details': 'Gruppendetails',
'Group Head': 'Gruppenleiter',
'Group Member added': 'Gruppenmitglied hinzugefügt',
'Group Members': 'Gruppenmitglieder',
'Group Memberships': 'Gruppenzugehörigkeiten',
'Group Name': 'Gruppenname',
'Group Size Day': 'Gruppengröße Tag',
'Group Size Night': 'Gruppengröße Nacht',
'Group Title': 'Gruppentitel',
'Group Type': 'Gruppentyp',
'Group added': 'Gruppe hinzugefügt',
'Group deleted': 'Gruppe gelöscht',
'Group description': 'Gruppenbeschreibung',
'Group updated': 'Gruppe aktualisiert',
'Group': 'Gruppe',
'Grouped by': 'Gruppiert nach',
'Groups removed': 'Gruppen entfernt',
'Groups': 'Gruppen',
'GU Done': 'GU erledigt',
'Guest': 'Gast',
'HR Manager': 'Personalmanager',
'Hail': 'Hagel',
'Hair Color': 'Haarfarbe',
'Hair Length': 'Haarlänge',
'Hair Style': 'Haarschnitt',
'Has data from this Reference Document been entered into Sahana?': 'Wurden Daten von diesem Referenzdokument in Sahana eingetragen?',
'Has the Certificate for receipt of the shipment been given to the sender?': 'Wurde das Zertifikat für den Empfang der Lieferung an den Absender übergeben?',
'Has the GRN (Goods Received Note) been completed?': 'Wurde die Warenempfangsmeldung (GRN) ausgefüllt?',
'Hazard Pay': 'Gefahrenzulage',
'Hazardous Material': 'Gefahrgut',
'Hazardous Road Conditions': 'Gefährliche Strassenverhältnisse',
'Header Background': 'Hintergrund der Kopfzeile',
'Header background file %s missing!': 'Hintergrund der Kopfzeile Datei %s fehlt!',
'Headquarters': 'Hauptquartiere',
'Head of Family': 'Familienoberhaupt',
'Health care assistance, Rank': 'Unterstützung Gesundsheitspflege, Rang',
'Health center with beds': 'Gesundheitszentrum mit Betten',
'Health center without beds': 'Gesundheitszentrum ohne Betten',
'Health center': 'Gesundheitszentrum',
'Health services status': 'Status des Gesundheitswesens',
'Health': 'Gesundheit',
'Healthcare Worker': 'Arbeiter im Gesundheitswesen',
'Heat Wave': 'Hitzewelle',
'Heat and Humidity': 'Wärme und Feuchtigkeit',
'Height': 'Höhe',
'Height (cm)': 'Höhe (cm)',
'Height (m)': 'Höhe (m)',
'Height': 'Höhe',
'Heliports': 'Hubschrauberlandeplätze',
'Help': 'Hilfe',
'Help Wanted': 'Hilfe benötigt',
'Helps to monitor status of hospitals': 'Hilfe um den Status von Krankenhäusern zu überwachen',
'Helps to report and search for missing persons': 'Hilfe beim Melden von und bei der Suche nach vermissten Personen',
'Here are the solution items related to the problem.': 'Hier sind die mit diesem Problem verbundenen Lösungselemente.',
'Heritage Listed': 'Erbe aufgelistet',
'Hide': 'Verstecken',
'Hierarchy': 'Hierarchie',
'Hierarchy Level 0 Name (i.e. Country)': 'Hierachiestufe 0 Name (d.h. Land)',
'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierachiestufe 1 Name (z. B. Land oder Provinz / Gebiet)',
'Hierarchy Level 2 Name (e.g. District or County)': 'Hierachiestufe 2 Name (z. B. Bezirk)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierachiestufe 3 Name (z. B. Ort / Stadt / Dorf)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierachiestufe 4 Name (z.B. Nachbarschaft)',
'Hierarchy Level 5 Name': 'Hierarchie Stufe 5 Name',
'High Tide Depth': 'Tiefe bei maximaler Tide',
'High Water': 'Hochwasser',
'High': 'Hoch',
'Highest Priority Open Requests': 'Offene Anfragen höchster Priorität',
'History': 'Geschichte',
'Hit the back button on your browser to try again.': 'Verwenden Sie die Back Schaltfläche ihres Browsers um es erneut zu versuchen.',
'Holiday Address': 'Urlaubsadresse',
'Home Address': 'Heimatsadresse',
'Home Country': 'Land des Wohnsitzes',
'Home Crime': 'Häusliche Kriminalität',
'Home': 'Startseite',
'Hospital Details': 'Details zum Krankenhaus',
'Hospital Status Report': 'Statusbericht zum Krankenhaus',
'Hospital information added': 'Krankenhausinformationen hinzugefügt',
'Hospital information deleted': 'Krankenhausinformationen gelöscht',
'Hospital information updated': 'Krankenhausinformationen aktualisiert',
'Hospital status assessment.': 'Beurteilung des Zustand des Krankenhauses',
'Hospital': 'Krankenhaus',
'Hospitals': 'Krankenhäuser',
'Hour': 'Stunde',
'Hours': 'Stunden',
'Hours by': 'Stunden gem.',
'Hours by Program Import': 'Stunden gem. Programm Import',
'Hours by Program Report': 'Stunden nach Programmbericht',
'Hours by Role Import': 'Stunden gem. Rollen Import',
'Hours by Role Report': 'Stunden nach Rollenbericht',
'Household kits received': 'Haushaltsbausätze (-kits) erhalten',
'Household kits, source': 'Herkunft der Haushaltbausätze (-kits)',
'Housing Unit': 'Gebäude',
'How does it work?': 'Wie funktioniert das?',
'How is this person affected by the disaster? (Select all that apply)': 'Wie ist diese Person von der Katastrophe betroffen? (Wählen Sie alles Zutreffende aus)',
'How long will the food last?': 'Wie lange werden die Lebensmittel reichen?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind durch die Krise umgekommen',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind durch die Krise verletzt worden',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind aufgrund der Krise verschollen',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind durch die Krise umgekommen',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind durch die Krise verletzt worden',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind aufgrund der Krise verschollen',
'How many Men (18 yrs+) are Dead due to the crisis': 'Wieviele Männer (18 Jahre+) sind durch die Krise umgekommen',
'How many Men (18 yrs+) are Injured due to the crisis': 'Wie viele Männer (18 + Jahre) wurden wegen der Krise verletzt',
'How many Men (18 yrs+) are Missing due to the crisis': 'Wie viele Männer (18 + Jahre) sind aufgrund der Krise verschollen',
'How many Women (18 yrs+) are Dead due to the crisis': 'Wieviele Frauen (18+ Jahre) sind durch die Krise umgekommen',
'How many Women (18 yrs+) are Injured due to the crisis': 'Wieviele Frauen (18+ Jahre) wurden wegen der Krise verletzt',
'How many Women (18 yrs+) are Missing due to the crisis': 'Wie viele Frauen (18 Jahre und älter) sind aufgrund der Krise verschollen',
'How many days will the supplies last?': 'Wie viele Tage werden die Waren reichen?',
'How many new cases have been admitted to this facility in the past 24h?': 'Wie viele neue Fälle wurden während der letzten 24 Stunden dieser Einrichtung zugewiesen?',
'How many of the patients with the disease died in the past 24h at this facility?': 'Wie viele der Patienten mit dieser Krankheit sind in den letzten 24 Stunden in dieser Einrichtung gestorben?',
'How many patients with the disease are currently hospitalized at this facility?': 'Wieviele Patienten mit dieser Krankheit sind momentan in dieser Einrichtung in Behandlung?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Wie viele Details sind sichtbar. Eine hohe Zoom-Stufe bedeutet viele Details, aber keine gute Übersicht. Eine niedrige Zoom-Stufe führt zu einer guten Übersicht, es fehlen aber die Details.',
'Hub': 'Zentrum',
'Human Resource Details': 'Details zur Personalressource',
'Human Resource Management': 'Management der Personalressourcen',
'Human Resource added': 'Personalressource hinzugefügt',
'Human Resource removed': 'Personalressource entfernt',
'Human Resource updated': 'Personalressource aktualisiert',
'Human Resource': 'Personalressource',
'Human Resources': 'Personalressourcen',
'Humanitarian NGO': 'Humanitäre NGO',
'Humanitarian Use': 'Humanitäre Zwecke',
'Hurricane Force Wind': 'Wind in Hurrikanstärke',
'Hurricane': 'Wirbelsturm',
'Hygiene kits received': 'Hygienekits empfangen',
'Hygiene kits, source': 'Herkunft der Hygienekits',
'Hygiene practice': 'Hygienepraxis',
'Hygiene problems': 'Hygieneprobleme',
'I am available in the following area(s)': 'Ich stehe in folgenden Bereichen zur Verfügung',
'IATA': 'IATA',
'ICAO': 'ICAO',
'ID Tag Number': 'Identifikations-Etikett-Nummer',
'ID Tag': 'Identifikationsetikett',
'ID Type': 'ID-Typ',
'Ice Pressure': 'Eisdruck',
'Iceberg': 'Eisberg',
'Identification Report': 'Indentifizierungsbericht',
'Identification Reports': 'Identifizierungsberichte',
'Identification Status': 'Status der Identifizierung',
'Identification': 'Identifizierung',
'Identified as': 'Identifiziert als',
'Identified by': 'Identifiziert durch',
'Identity Details': 'Details zur Identität',
'Identity added': 'Identität hinzugefügt',
'Identity deleted': 'Identität gelöscht',
'Identity updated': 'Identität aktualisiert',
'Identity': 'Identität',
'If a ticket was issued then please provide the Ticket ID.': 'Wenn ein Ticket ausgestellt wurde, bitte die Ticket-ID angeben.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Wenn ein Benutzer sicherstellt, dass er oder sie eine Email-Adresse in dieser Domäne besitzt, wird das Approver Feld dazu verwendet, um zu bestimmen ob und von wem weitere Genehmigungen erforderlich sind.',
'If it is a URL leading to HTML, then this will downloaded.': 'Handelt es sich um eine URL zu einer HTML Seite, dann wird diese heruntergeladen.',
'If neither are defined, then the Default Marker is used.': 'Wenn nichts davon definiert wurde, wird der Standard Marker (Symbol) verwendet.',
'If no marker defined then the system default marker is used': 'Wenn keine Markierung (Symbolisierung) definiert ist dann wird die im System festgelegte Standardmarkierung verwendet',
'If no, specify why': 'Wenn nein, geben Sie bitte einen Grund dafür an',
'If none are selected, then all are searched.': 'Wird keine ausgewählt, werden alle durchsucht.',
'If the location is a geographic area, then state at what level here.': 'Wenn der Ort ein geographisches Gebiet ist, geben Sie bitte eine entsprechende Stufe an',
'If the request type is "Other", please enter request details here.': 'Wenn der Anfragetyp "Andere" ist, geben Sie bitte hier weitere Details zur Anfrage ein.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Wenn dieses Feld ausgefüllt ist, dann wird ein Benutzer mit der gleichen Domainadresse automatisch als Mitarbeiter dieser Organisation zugeordnet.',
'If this is set to True then mails will be deleted from the server after downloading.': "Wenn dies auf 'Wahr' gesetzt ist, dann werden die Mails nach dem Herunterladen vom Server gelöscht.",
'If this record should be restricted then select which role is required to access the record here.': 'Wenn der Zugriff auf diesen Datensatz beschränkt werden soll, wählen Sie hier die Rolle aus, die für den Zugriff erforderlich ist.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Wenn dieser Eintrag beschränkt werden soll, dann wählen Sie hier aus, welche Rolle(n) für den Zugriff auf den Eintrag berechtigt sind.',
'If yes, specify what and by whom': 'Wenn ja, geben Sie an, was und von wem',
'If yes, which and how': 'Wenn ja, welche und wie',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Wenn Sie kein Referenzdokument angeben, wird stattdessen ihre Mailadresse angezeigt damit die Daten verifiziert werden können.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'Wenn sie die Geonames ID des Standortes wissen, dann können Sie diese hier eingeben.',
'If you know what the OSM ID of this location is then you can enter it here.': 'Wenn sie die OSM ID dieser des Standortes wissen, dann können Sie diese hier eingeben.',
'If you need to add a new document then you can click here to attach one.': 'Wenn sie ein neues Dokument hinzufügen wollen, dann können sSie hier Klicken um eines anzufügen.',
'If you want several values, then separate with': 'Wenn Sie mehrere Werte möchten, dann trennen Sie diese mit',
'If you would like to help, then please': 'Wenn Sie helfen möchten, dann bitte',
'Ignore Errors?': 'Fehler ignorieren?',
'Illegal Immigrant': 'Illegaler Einwanderer',
'Illiterate': 'Analphabet',
'illiterate': 'Analphabet',
'Image Details': 'Details zum Bild',
'Image Tags': 'Tags für Bild',
'Image Type': 'Typ des Bilds',
'Image Upload': 'Bild hochladen',
'Image added': 'Bild hinzugefügt',
'Image deleted': 'Bild gelöscht',
'Image updated': 'Bild aktualisiert',
'Image': 'Bild',
'Imagery': 'Bilddaten',
'Images': 'Bilder',
'Impact Assessments': 'Folgenabschätzung',
'Impact Details': 'Details zur Folge/Auswirkung',
'Impact Type Details': 'Details zum Typ der Auswirkung',
'Impact Type added': 'ATyp der Auswirkung hinzugefügt',
'Impact Type deleted': 'Typ der Auswirkung gelöscht',
'Impact Type updated': 'Typ der Auswirkung aktualisiert',
'Impact Type': 'Auswirkungsarten',
'Impact Types': 'Auswirkungsarten',
'Impact added': 'Auswirkung hinzugefügt',
'Impact deleted': 'Auswirkung gelöscht',
'Impact updated': 'Auswirkung aktualisiert',
'Impacts': 'Auswirkungen',
'Import & Export Data': 'Import & Export von Daten',
'Import Catalog Items': 'Importiere Katalogartikel',
'Import Data': 'Import von Daten',
'Import Event Types': 'Importiere Ereignistypen',
'Import File': 'Datei importieren',
'Import Heliports': 'Hubschrauberlandeplätze importieren',
'Import Incident Types': 'Ereignistypen importieren',
'Import Locations': 'Gebiete/Standorte importieren',
'Import Projects': 'Projekte importieren',
'Import Staff': 'Mitarbeiter importieren',
'Import Suppliers': 'Lieferanten importieren',
'Import Training Participants': 'Kursteilnehmer importieren',
'Import Users': 'Import von Benutzern',
'Import Volunteers': 'Freiwillige importieren',
'Import Warehouse Stock': 'Warenlagerbestand importieren',
'Import Warehouses': 'Warenlager importieren',
'Import and Export': 'Import und Export',
'Import from CSV': 'Import einer CSV-Datei',
'Import from OpenStreetMap': 'Import aus OpenStreetMap',
'Import from Ushahidi Instance': 'Import aus Ushahidi Instanz',
'Import Hours': 'Import Stundenliste',
'Import if Master': 'Import wenn Master',
'Import multiple tables as CSV': 'Mehrere Tabellen als CSV importieren',
'Import Participant List': 'Import Teilnehmerliste',
'Import Template Layout': 'Import Vorlagenlayout',
'Import Templates': 'Import Vorlagen',
'Import': 'Import',
'Important': 'Wichtig',
'Importantly where there are no aid services being provided': 'Bedeutsam wo keine Hilfsleistungen angeboten werden',
'Importing data from spreadsheets': 'Importieren von Daten aus Tabellendokumenten',
'Improper decontamination': 'Unzureichende Dekontamination',
'Improper handling of dead bodies': 'Unzureichende Behandlung von Leichen',
'In Catalogs': 'In Katalogen',
'In Inventories': 'In den Beständen',
'In Process': 'In Bearbeitung',
'In Progress': 'In Arbeit',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Beim Aufbau des Fensters wird die Karte maximiert um das Fenster auszufüllen, daher ist es nicht notwendig hier einen grossen Wert festzulegen.',
'Inbound Mail Settings': 'Eingehende Mail-Einstellungen',
'InBox': 'Eingang',
'Incident Categories': 'Kategorien für Vorfälle ',
'Incident Report Details': 'Details zum Vorfall-Bericht',
'Incident Report added': 'Vorfall-Bericht hinzugefügt',
'Incident Report deleted': 'Vorfall-Bericht gelöscht',
'Incident Report updated': 'Vorfall-Bericht aktualisiert',
'Incident Report': 'Vorfall-Bericht',
'Incident Reporting System': 'Vorfall-Berichtsystem',
'Incident Reporting': 'Vorfall-Berichtswesen',
'Incident Reports': 'Vorfall-Berichte',
'Incident': 'Vorfall',
'Incidents': 'Vorfälle',
'Incident Type': 'Vorfallstyp',
'Incident Types': 'Typen von Vorfällen',
'Incident Timeline': 'Zeitplan der Ereignisse',
'Incoming Shipment canceled': 'Eingehende Sendung abgebrochen',
'Incoming Shipment updated': 'Eingehende Sendung aktualisiert',
'Incoming': 'Eingehend',
'Incomplete': 'Unvollständig',
'Individuals': 'Einzelpersonen',
'Indirect support cost HQ': 'Indirekte Unterstützungskosten Hauptquartier',
'Industrial Crime': 'Industrielle Kriminalität',
'Industrial': 'Industriell',
'Industry Fire': 'Industriefeuer',
'Infant (0-1)': 'Säugling (0-1)',
'Infectious Disease (Hazardous Material)': 'Ansteckende Krankheit (gefährliches Material)',
'Infectious Disease': 'Ansteckende Krankheit',
'Infectious Diseases': 'Infektionskrankheiten',
'Infestation': 'Aktivierung',
'Informal Leader': 'Informeller Leiter',
'Informal camp': 'Informelles Camp',
'Information gaps': 'Informationenlücken',
'Infusion catheters available': 'Infusionskatheter verfügbar',
'Infusion catheters need per 24h': 'Benötigte Infusionskatheter pro 24h',
'Infusion catheters needed per 24h': 'Benötigte Infusionskatheter pro 24h',
'Infusions available': 'Infusionen verfügbar',
'Infusions needed per 24h': 'Benötigte Infusionen pro 24h',
'Initials': 'Namenskürzel',
'Inspected': 'Geprüft',
'Inspection Date': 'Prüfdatum',
'Inspection date and time': 'Datum und Uhrzeit der Überprüfung',
'Inspection time': 'Zeit der Überprüfung',
'Inspector ID': 'Prüfer-ID',
'Instant Porridge': 'Hafer Fertigbrei',
'Institution': 'Institution',
'Instructor': 'Ausbilder',
'Insufficient vars: Need module, resource, jresource, instance': 'Unzureichende vars: Benötige module, resource, jresource, instance',
'Insufficient': 'Nicht ausreichend',
'Intake Items': 'Annahme Güter',
'Intergovernmental Organization': 'Zwischenstaatliche Organisation',
'Interior walls, partitions': 'Innere Wände, Partitionen',
'Internal Resources': 'Interne Ressourcen',
'Internal Resource': 'Interne Ressource',
'Internal Shipment': 'Interne Lieferung',
'Internal State': 'Interner Zustand',
'International NGO': 'Internationale NGO',
'International Organization': 'Internationale Organisation',
'interpreter required': 'Dolmetscher erforderlich',
'Interview taking place at': 'Ort des Interviews',
'inv Home Page': 'inv Homepage',
'Invalid Query': 'Ungültige Abfrage',
'Invalid request!': 'Ungültige Anfrage!',
'Invalid ticket': 'Ungültiges Ticket',
'Invalid': 'Ungültig / Invalide',
'Inventories': 'Bestände',
'Inventory': 'Bestand',
'Inventory Item Details': 'Details zu einzelnem Bestandsartikel',
'Inventory Item updated': 'Bestandsartikel aktualisiert',
'Inventory Item': 'Bestandsartikel',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Bestandsartikel umfassen sowohl Verbrauchsmaterialien als auch solche die am Bestimmungsort in Anlagen umgewandelt werden.',
'Inventory Items': 'Bestandsartikel',
'Inventory Management': 'Lagerbestandsverwaltung',
'Inventory of Effects': 'Bestand von Vermögenswerten',
'Is editing level L%d locations allowed?': 'Ist die Bearbeitung von Level L%d Standorten zulässig?',
'Is it safe to collect water?': 'Ist es sicher Wasser zu sammeln?',
'Is this a strict hierarchy?': 'Ist dies eine strenge Hierarchie?',
'Issuing Authority': 'Ausstellende Behörde',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Es erfasst nicht nur die Orte wo sie aktiv sind, sondern erfasst auch Informationen über den Umfang der Projekte die sie im jeweiligen Gebiet durchführen.',
'Item Added to Shipment': 'Artikel der Lieferung hinzugefügt',
'Item Catalog Details': 'Details zum Artikelkatalog',
'Item Categories': 'Artikelkategorien',
'Item Category Details': ' Details zur Artikelkategorie',
'Item Category added': 'Artikelkategorie hinzugefügt',
'Item Category deleted': 'Artikelkategorie gelöscht',
'Item Category updated': 'Artikelkategorie aktualisiert',
'Item Category': 'Artikelkategorie',
'Item Details': 'Details zum Artikel',
'Item Pack Details': 'Details zum Artikelpaket ',
'Item Pack added': 'Artikelpaket hinzugefügt',
'Item Pack deleted': 'Artikelpaket gelöscht',
'Item Pack updated': 'Artikelpaket aktualisiert',
'Item Packs': 'Artikelpaket',
'Item Tracking Status': 'Artikel Verfolgungsstatus',
'Item/Description': 'Artikel/Beschreibung',
'Items/Description': 'Artikel/Beschreibung',
'Item added to Inventory': 'Artikel zum Bestand hinzugefügt',
'Item added to shipment': 'Artikel der Lieferung hinzugefügt',
'Item added': 'Artikel hinzugefügt',
'Item already in Bundle!': 'Artikel bereits in Produktpaket!',
'Item already in Kit!': 'Artikel bereits in Ausstattung (Kit)!',
'Item already in budget!': 'Artikel bereits im Budget!',
'Item deleted': 'Artikel gelöscht',
'Item removed from Inventory': 'Artikel aus dem Bestand entfernt',
'Item updated': 'Artikel aktualisiert',
'Item': 'Artikel',
'Items in Category are Vehicles': 'Artikel in dieser Kategorie sind Fahrzeuge',
'Items in Category can be Assets': 'Artikel in der Kategorie können als Anlagen verwendet werden',
'Items': 'Artikel',
'Japanese': 'Japanisch',
'Jerry can': 'Kanister',
'Jew': 'Jude',
'Jewish': 'Jüdisch',
'Job Role Catalog': 'Katalog für Tätigkeiten',
'Job Role Details': 'Details zur Tätigkeit',
'Job Role added': 'Tätigkeit hinzugefügt',
'Job Role deleted': 'Tätigkeit entfernt',
'Job Role updated': 'Tätigkeit aktualisiert',
'Job Role': 'Tätigkeit',
'Job Roles': 'Tätigkeiten',
'Job Title': 'Berufsbezeichnung',
'Job Title Catalog': 'Katalog der Berufsbezeichnungen',
'Journal Entry Details': 'Details zum Journaleintrag',
'Journal entry added': 'Journaleintrag hinzugefügt',
'Journal entry deleted': 'Journaleintrag gelöscht',
'Journal entry updated': 'Journaleintrag aktualisiert',
'Key Details': 'Details zum Schlüssel',
'Key added': 'Schlüssel hinzugefügt',
'Key deleted': 'Schlüssel gelöscht',
'Key updated': 'Schlüssel aktualisiert',
'Key': 'Schlüssel',
'Keys': 'Schlüssel',
'Kit Contents': 'Inhalt der Ausstattung (Kit)',
'Kit Details': 'Details zur Ausstattung (Kit)',
'Kit Updated': 'Ausstattung (Kit) aktualisiert',
'Kit added': 'Ausstattung (Kit) hinzugefügt',
'Kit deleted': 'Ausstattung (Kit) gelöscht',
'Kit updated': 'Ausstattung (Kit) aktualisiert',
'Kits': 'Ausstattungen (Kits)',
'Kit': 'Ausstattung (Kit)',
'Kit?': 'Ausstattung (Kit)?',
'Kitting': 'Ausstattung zusammenstellen',
'Known Identities': 'Bekannte Identitäten',
'Known incidents of violence against women/girls': 'Bekannte Fälle von Gewalt gegen Frauen/Mädchen',
'Known incidents of violence since disaster': 'Bekannte Fällen von Gewalt seit der Katastrophe',
'LICENSE': 'LIZENZ',
'Lack of material': 'Mangel an Material',
'Lack of school uniform': 'Fehlende Schuluniformen',
'Lack of supplies at school': 'Fehlende Vorräte an der Schule',
'Lack of transport to school': 'Fehlender Transportmöglichkeiten zur Schule',
'Lactating women': 'Stillende frauen',
'Lahar': 'Mure',
'Landslide': 'Erdrutsch',
'Language': 'Sprache',
'Language / Communication Mode': 'Sprache / Verständigungsmodus',
'Last Downloaded': 'Zuletzt heruntergeladen',
'Last Name': 'Nachname',
'Last Pull': 'Letzter Pull',
'Last Push': 'Letzter Push',
'Last known location': 'Letzte bekannte Position',
'Last synchronization time': 'Zeitpunkt der letzte Synchronisierung',
'Last updated by': 'Letzte Aktualisierung durch',
'Last updated on': 'Letzte Aktualisierung am',
'Last updated': 'Letzte Aktualisierung',
'Last': 'Letzte',
'Latest Information': 'Aktuelle Informationen',
'Latitude & Longitude': 'Breitengrad und Längengrad',
'Latitude is North-South (Up-Down).': 'Breitengrad ist Nord-Süd (Oben-Unten).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Der Breitengrad ist Null am Äquator, Positiv auf der nördlichen und negativ auf der südlichen Erdhalbkugel.',
'Latitude of Map Center': 'Breitengrad der Kartenmitte',
'Latitude of far northern end of the region of interest.': 'Nördlichster Breitengrad der betroffenen Region',
'Latitude of far southern end of the region of interest.': 'Südlichster Breitengrad der betroffenen Region',
'Latitude should be between': 'Breite muss zwischen',
'Latitude': 'Breitengrad',
'Latrines': 'Toiletten',
'Law enforcement, military, homeland and local/private security': 'Executive, Militär und andere lokale/private Sicherheitsagenturen',
'Layer Poperties': 'Kartenebenen anpassen',
'Layer added': 'Layer hinzugefügt',
'Layer deleted': 'Layer gelöscht',
'Layer updated': 'Layer aktualisiert',
'Layer': 'Kartenebene',
'Layers updated': 'Kartenebenen aktualisiert',
'Layers': 'Kartenebenen',
'Leader': 'Anführer',
'Lead Implementer': 'Hauptimplementierer',
'Legally Departed': 'Legal abgereist',
'Legend Format': 'Format der Legende',
'Legend': 'Legende',
'Length (m)': 'Länge (m)',
'Less Options': 'Weniger Optionen',
'Level of Award': 'Stufe der Auszeichnung',
'Level 1 Assessment Details': 'Stufe 1 Beurteilung - Details',
'Level 1 Assessment added': 'Stufe 1 Beurteilung hinzugefügt',
'Level 1 Assessment deleted': 'Stufe 1 Beurteilung entfernt',
'Level 1 Assessment updated': 'Stufe 1 Beurteilung aktualisiert',
'Level 1 Assessments': 'Stufe 1 Beurteilungen',
'Level 1': 'Stufe 1',
'Level 2 Assessment Details': 'Stufe 2 Beurteilung - Details',
'Level 2 Assessment added': 'Stufe 2 Beurteilung hinzugefügt',
'Level 2 Assessment deleted': 'Stufe 2 Beurteilung entfernt',
'Level 2 Assessment updated': 'Stufe 2 Beurteilung aktualisiert',
'Level 2 Assessments': 'Stufe 2 Beurteilungen',
'Level 2 or detailed engineering evaluation recommended': 'Stufe 2 oder detaillierte technische Evaluierung empfohlen',
'Level 2': 'Stufe 2',
'Level 3': 'Stufe 3',
'Level': 'Stufe',
'Library support not available for OpenID': 'OpenID wird von Bibliothek nicht unterstützt',
'License Plate': 'Nummernschild',
'LineString': 'LineString',
'Link to this result': 'Link zu dieser Liste',
'List / Add Baseline Types': 'Arten von Referenzdaten auflisten / hinzufügen',
'List / Add Impact Types': 'Arten von Auswirkungen auflisten / hinzufügen',
'List / Add Services': 'Leistungen auflisten / hinzufügen',
'List / Add Types': 'Typen auflisten / hinzufügen',
'List Activities': 'Aktivitäten auflisten',
'List All Assets': 'Alle Anlagen auflisten',
'List All Catalog Items': 'Auflisten aller Artikel aus dem Katalog',
'List All Commitments': 'Auflisten aller Zusagen',
'List All Entries': 'Alle Einträgen auflisten',
'List All Item Categories': 'Auflisten aller Artikelkategorien',
'List All Memberships': 'Alle Mitgliedschaften auflisten',
'List All Organization Approvers & Whitelists': 'Zeige alle Organisationsbestätiger & Whitelists',
'List All Received Shipments': 'Auflisten aller empfangenen Lieferungen',
'List All Records': 'Auflisten aller Datensätze',
'List All Requested Items': 'Auflisten aller angefragten Artikel',
'List All Requests': 'Auflisten aller Anfragen',
'List All Roles': 'Zeige alle Rollen',
'List All Sent Shipments': 'Liste aller gesendeten Lieferungen',
'List All Users': 'Zeige alle Nutzer',
'List All Vehicles': 'Liste aller Fahrzeuge',
'List All': 'Alle auflisten',
'List Alternative Items': 'Liste alternativer Artikel',
'List Assessment Summaries': 'Zusammenfassungen der Beurteilungen auflisten',
'List Assessments': 'Beurteilungen auflisten',
'List Assets': 'Anlagen auflisten',
'List Availability': 'Liste Verfügbarkeit',
'List Baseline Types': 'Liste der Typen von Referenzdaten',
'List Baselines': 'Liste der Referenzdaten',
'List Brands': 'Marken auflisten',
'List Budgets': 'Budgets auflisten',
'List Bundles': 'Produktpakete auflisten',
'List Camp Services': 'Liste der Leistungen im Camp',
'List Camp Types': 'Liste Typen von Camps',
'List Camps': 'Liste Camps',
'List Catalog Items': 'Katalogelemente auflisten',
'List Catalogs': 'Liste Kataloge',
'List Certificates': 'Liste Zertifikate',
'List Certifications': 'Liste Zertifizierungen',
'List Checklists': 'Checklisten Auflisten',
'List Cluster Subsectors': 'Cluster Teilbereiche Auflisten',
'List Clusters': 'Cluster Auflisten',
'List Commitment Items': 'Liste zugesagter Artikel',
'List Commitments': 'Liste Zusagen',
'List Competencies': 'Liste Kompetenzen',
'List Competency Ratings': 'Liste Kompetenzrating',
'List Conflicts': 'Liste Konflikte',
'List Contact Information': 'Liste Kontaktinformationen',
'List Contacts': 'Liste Kontakte',
'List Course Certificates': 'Liste Kurszertifikate',
'List Courses': 'Liste Kurse',
'List Credentials': 'Liste von Qualifikationen',
'List Current': 'Aktuelle Liste',
'List Documents': 'Liste Dokumente',
'List Donors': 'Liste Spender',
'List Events': 'Liste Ereignisse',
'List Facilities': 'Liste Einrichtungen',
'List Feature Layers': 'Liste Objekt-Layer',
'List Flood Reports': 'Liste Flutberichte',
'List Groups': 'Liste Gruppen',
'List Groups/View Members': 'Liste Gruppen/Anzeige der Mitglieder',
'List Hospitals': 'Liste Krankenhäuser',
'List Human Resources': 'Liste der personellen Ressourcen',
'List Identities': 'Identitäten auflisten',
'List Images': 'Bilder auflisten',
'List Impact Assessments': 'Folgenabschätzung auflisten',
'List Impact Types': 'Auswirkungsarten auflisten',
'List Impacts': 'Auswirkungen auflisten',
'List Incident Reports': 'Vorfallberichte auflisten',
'List Item Categories': 'Liste Artikelkategorien',
'List Item Packs': 'Liste der Artikelpakete',
'List Items in Inventory': 'Liste der Artikel im Bestand',
'List Items': 'Liste der Artikel',
'List Job Roles': 'Liste der Tätigkeiten',
'List Keys': 'Schlüssel auflisten',
'List Kits': 'Liste Ausstattungen (Kits)',
'List Layers': 'Liste Layer',
'List Level 1 Assessments': 'Liste Stufe 1 Beurteilungen',
'List Level 1 assessments': 'Liste Stufe 1 Beurteilungen',
'List Level 2 Assessments': 'Liste Stufe 2 Beurteilungen',
'List Level 2 assessments': 'Liste Stufe 2 Beurteilungen',
'List Locations': 'Standorte auflisten',
'List Log Entries': 'Protokolleinträge auflisten',
'List Map Profiles': 'Liste der Kartenkonfigurationen',
'List Markers': 'Marker/Symbole auflisten',
'List Members': 'Mitglieder auflisten',
'List Memberships': 'Mitgliedschaften auflisten',
'List Messages': 'Nachrichten auflisten',
'List Missing Persons': 'Vermisste Personen auflisten',
'List Missions': 'Liste Aufträge',
'List Need Types': 'Bedarftypen auflisten',
'List Needs': 'Bedarf auflisten',
'List Offices': 'Liste der Büros',
'List Organizations': 'Liste der Organisationen',
'List Peers': 'Liste der Peers',
'List Personal Effects': 'Liste der persönlichen Habe',
'List Persons': 'Liste der Personen',
'List Photos': 'Liste der Bilder',
'List Population Statistics': 'Liste Bevölkerungsstatistiken',
'List Positions': 'Liste der Positionen',
'List Problems': 'Liste der Probleme',
'List Projections': 'Liste der Kartenprojektionen',
'List Projects': 'Liste Projekte',
'List Rapid Assessments': 'Liste Schnell-Beurteilungen',
'List Recurring Requests': 'Liste wiederkehrender Anfragen',
'List Received Items': 'Liste empfangene Artikel',
'List Received Shipments': 'Liste empfangene Lieferungen',
'List Records': 'Liste Datensätze',
'List Registrations': 'Liste Registrierungen',
'List Reports': 'Liste Berichte',
'List Request Items': 'Angefragte Artikel auflisten',
'List Requests': 'Anfragen auflisten',
'List Resources': 'Ressourcen auflisten',
'List Rivers': 'Flüsse auflisten',
'List Roles': 'Rollen auflisten',
'List Rooms': 'Liste Räume',
'List Scenarios': 'Liste Szenarien',
'List Sections': 'Abschnitte auflisten',
'List Sectors': 'Bereiche auflisten',
'List Sent Items': 'Gesendete Artikel auflisten',
'List Sent Shipments': 'Liste verschickte Lieferungen',
'List Service Profiles': 'Leistungsprofile auflisten',
'List Settings': 'Einstellungen auflisten',
'List Shelter Services': 'Leistungen der Unterkunft auflisten',
'List Shelter Types': 'Typen der Unterkunft auflisten',
'List Shelters': 'Unterkünfte auflisten',
'List Site Needs': 'Alle Bedarfe',
'List Skill Equivalences': 'Liste Fähigkeits-Vergleichbarkeiten',
'List Skill Provisions': 'Fähigkeits-Bereitstellungen auflisten',
'List Skill Types': 'Liste der Typen von Fähigkeiten',
'List Skills': 'Liste Fähigkeiten',
'List Solutions': 'Liste Lösungen',
'List Staff Types': 'Mitarbeitertypen auflisten',
'List Status': 'Status auflisten',
'List Subscriptions': 'Abonnements anzeigen',
'List Subsectors': 'Teilbereiche auflisten',
'List Support Requests': 'Liste der Anfragen nach Unterstützung',
'List Survey Answers': 'Liste Umfrage-Antworten',
'List Survey Questions': 'Liste Umfrage-Fragen',
'List Survey Series': 'Liste Umfrage-Serien',
'List Survey Templates': 'Liste Umfrage-Vorlagen',
'List Tasks': 'Aufgaben auflisten',
'List Teams': 'Teams auflisten',
'List Themes': 'Themen auflisten',
'List Tickets': 'Tickets auflisten',
'List Tracks': 'Tracks auflisten',
'List Trainings': 'Schulungen/Ausbildung auflisten',
'List Units': 'Einheiten auflisten',
'List Users': 'Liste Benutzer',
'List Warehouses': 'Liste Warenlager',
'List all': 'Alle auflisten',
'List available Scenarios': 'Liste verfügbarer Szenarien',
'List of Items': 'Liste der Artikel',
'List of Missing Persons': 'Liste der vermißten Personen',
'List of Peers': 'Liste der Peers',
'List of Reports': 'Liste der Berichte',
'List of Requests': 'Liste der Anfragen',
'List of Spreadsheets uploaded': 'Liste der hochgeladenen Tabellen',
'List of Spreadsheets': 'Liste der Tabellen',
'List of Volunteers for this skill set': 'Liste der Freiwilligen für dieses Fachgebiet',
'List of Volunteers': 'Liste der Freiwilligen',
'List of addresses': 'Liste der Adressen',
'List unidentified': 'Nicht identifizierte Objekte auflisten',
'List': 'Liste',
'List/Add': 'Auflisten/Hinzufügen',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Liste "Wer macht was & wo". Ermöglicht Hilfsorganizationen, ihre Aktivitäten zu koordinieren',
'Literacy': 'Schriftkundigkeit',
'literate': 'schriftkundig',
'Live Help': 'Aktuelle Hilfe',
'Livelihood': 'Lebensgrundlage',
'Load Cleaned Data into Database': 'Bereinigte Daten in die Datenbank laden',
'Load Raw File into Grid': 'Unformatierte Datei ins Grid laden',
'Loading': 'Wird geladen',
'Loading Equipment': 'Be-/Entladeaustattung',
'Local Name': 'Lokaler Name',
'Local Names': 'Lokale Namen',
'Location 1': 'Standort 1',
'Location 2': 'Standort 2',
'Location Detail': 'Details zum Gebiet/Standort',
'Location Details': 'Standortdetails',
'Location Hierarchies': 'Standort-Hierachien',
'Location Hierarchy Level 0 Name': 'Standort-Hierachie Level 0 Name',
'Location Hierarchy Level 1 Name': 'Standort-Hierachie Level 1 Name',
'Location Hierarchy Level 2 Name': 'Standort-Hierachie Level 2 Name',
'Location Hierarchy Level 3 Name': 'Standort-Hierarchie Level 3 Name',
'Location Hierarchy Level 4 Name': 'Standort-Hierarchie Level 4 Name',
'Location Hierarchy Level 5 Name': 'Standort-Hierarchie Level 5 Name',
'Location added': 'Standort hinzugefügt.',
'Location deleted': 'Standort gelöscht',
'Location group cannot be a parent.': 'Standortgruppe kann kein übergeordnetes Element sein',
'Location group cannot have a parent.': 'Standortgruppe kann kein übergeordnetes Elemenet haben.',
'Location groups can be used in the Regions menu.': 'Standortgruppen können im Gebietsmenu verwendet werden.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Standortgruppen können genutzt werden, um die Ergebnisse auf der Karte und in den Suchergebnissen zu filtern.',
'Location updated': 'Standort aktualisiert',
'Location': 'Standort',
'Locations of this level need to have a parent of level': 'Standorte dieser Ebene müssen ein übergeordnetes Element der folgenden Ebene haben',
'Locations': 'Standorte',
'Lockdown': 'Sperrung',
'Log Entry Details': 'Details zum Protokolleintrag',
'Log entry added': 'Protokolleintrag hinzugefügt',
'Log entry deleted': 'Protokolleintrag gelöscht',
'Log entry updated': 'Protokolleintrag aktualisiert',
'Log': 'Protokoll',
'Logged By': 'Protokolliert durch',
'Logged in': 'Eingeloggt',
'Logged out': 'Ausgeloggt',
'Login': 'Anmeldung',
'Logistics Management System': 'Logistik Managementsystem',
'Logistics': 'Logistik',
'Logo file %s missing!': 'Datei mit Logo %s fehlt!',
'Logout': 'Abmelden',
'Long Name': 'Langschriftlicher Name',
'Long Text': 'Langer Text',
'Longitude is West - East (sideways).': 'Die Geographische Länge ist West-Ost (seitlich).',
'Longitude is West-East (sideways).': 'Die Geographische Länge ist West-Ost (seitlich).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Die Geographische Länge ist 0 am Nullmeridian (GMT) und positiv in Richtung Osten (z.B. Großteil Europas und ganz Asien). In Richtung Westen - über den Atlantik und nach Amerika - ist sie negativ.',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Die Geographische Länge ist 0 am Nullmeridian (GMT) und positiv in Richtung Osten (z.B. Großteil Europas und ganz Asien). In Richtung Westen - über den Atlantik und nach Amerika - ist sie negativ.',
'Longitude of Map Center': 'Geographische Länge des Kartenmittelpunktes',
'Longitude of far eastern end of the region of interest.': 'Geographische Länge des östlichen Endes de Interessensgebietes.',
'Longitude of far western end of the region of interest.': 'Geographische Länge des westlichen Endes de Interessensgebietes.',
'Longitude should be between': 'Die Geographische Länge soll in folgendem Bereich liegen',
'Longitude': 'Geographische Länge',
'Looting': 'Plünderung',
'Lost Password': 'Kennwort vergessen',
'Lost': 'Verloren',
'Low': 'Niedrig',
'Low Tide Depth': 'Tiefe bei minimaler Tide',
'Magnetic Storm': 'Magnetischer Sturm',
'Mail': 'Post',
'Main Facility': 'Haupteinrichtung',
'Major Damage': 'Großer Schaden',
'Major expenses': 'Hauptausgaben',
'Major outward damage': 'Größter nach außen gerichteter Schaden',
'Major': 'Maßgeblich',
'Make Commitment': 'Eine Zusage machen',
'Make New Commitment': 'Neue Zusage machen',
'Make Request': 'Anfrage erstellen',
'Make Supplies Request': 'Artikelanfrage stellen',
'Make preparations per the <instruction>': 'Vorbereitungen treffen für <instruction>',
'Male': 'Männlich',
'Manage Layers in Catalog': 'Kartenebenen im Katalog verwalten',
'Manage Relief Item Catalogue': 'Katalog der Unterstützungselemente verwalten',
'Manage Users & Roles': 'Benutzer- und Rollenverwaltung',
'Manage Warehouses/Sites': 'Warenlager/Orte verwalten',
'Manage Your Facilities': 'Eigene Einrichtungen verwalten',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Verwaltung der Anfragen nach Vorräten, Anlagen, Mitarbeitern oder anderen Ressourcen. Vergleich mit den Beständen, wo Vorräte angefordert werden',
'Manage requests of hospitals for assistance.': 'Verwaltung der Anfragen von Krankenhäusern nach Unterstützung.',
'Manage volunteers by capturing their skills, availability and allocation': 'Verwaltung der Freiwilligen Helfer anhand ihrer Fähigkeiten, Verfügbarkeit und Zuordnung.',
'Managing Office': 'Verwaltungsbüro',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Verpflichtend. Beim GeoServer, ist das der Name des Layers. In den WFS Capabilities entspricht es dem Namen des FeatureType (ohne namespace - Teil hinter dem Doppelpunkt!).',
'Mandatory. The URL to access the service.': 'Verpflichtend. Die access URL des Dienstes.',
'Manual Synchronization': 'Manuelle Synchronisation',
'Manual': 'Anleitung',
'Many': 'Viele',
'Map Center Latitude': 'Geographische Breite des Kartenmittelpunkt',
'Map Center Longitude': 'Geographische Länge des Kartenmittelpunkt',
'Map Profile Details': 'Details zur Kartenkonfiguration ',
'Map Profile added': 'Kartenkonfiguration hinzugefügt',
'Map Profile deleted': 'Kartenkonfiguration gelöscht',
'Map Profile removed': 'Kartenkonfiguration entfernt',
'Map Profile updated': 'Kartenkonfiguration aktualisiert',
'Map Profile': 'Kartenkonfiguration',
'Map Profiles': 'Kartenkonfigurationen',
'Map Height': 'Höhe des Kartenfensters',
'Map Service Catalog': 'Karten Service-Katalog',
'Map Settings': 'Karteneinstellungen',
'Map Styles': 'Kartensymbolisierungen',
'Map Viewing Client': 'Kartenviewer',
'Map Width': 'Breite des Kartenfensters',
'Map Zoom': 'Kartenvergrößerung',
'Map of Hospitals': 'Karte der Krankenhäuser',
'Map of Offices': 'Karte der Büros',
'Map of Requests': 'Karte der Anfragen',
'Map of Vehicles': 'Karte der Fahrzeuge',
'Map': 'Karte',
'Marine Security': 'Hafensicherheit',
'Marital Status': 'Familienstand',
'Marker Details': 'Details zum Marker/Symbol',
'Marker added': 'Marker/Symbol hinzugefügt',
'Mark as duplicate': 'Markiere als Duplikat',
'Marker deleted': 'Marker/Symbol gelöscht',
'Marker updated': 'Marker/Symbol hinzugefügt',
'Marker': 'Marker/Symbol',
'Markers': 'Marker/Symbole',
'Master Message Log to process incoming reports & requests': 'Haupt-Nachrichtenprotokoll um eingehende Berichte und Anfragen zu bearbeiten',
'Master Message Log': 'Haupt-Nachrichtenprotokoll',
'Match Percentage': 'Grad der Übereinstimmung',
'Match Requests': 'Passende Anfrage',
'Match percentage indicates the % match between these two records': 'Der Grad der Übereinstimmung gibt die prozentuale Übereinstimmung zwischen zwei Datensätzen an',
'Match?': 'Übereinstimmung?',
'Matching Catalog Items': 'Übereinstimmende Katalogelemente',
'Matching Items': 'Übereinstimmende Artikel',
'Matching Records': 'Übereinstimmende Datensätze',
'Maximum Extent': 'Maximale Ausdehnung',
'Maximum Location Latitude': 'Maximale Geographische Breite des Gebietes',
'Maximum Location Longitude': 'Maximale Geographische Länge des Gebietes',
'Max Height': 'Max Höhe',
'Medical': 'Medizin',
'Medical and public health': 'Medizinische Betreuung und öffentliches Gesundheitswesen',
'Medium': 'Mittel',
'Megabytes per Month': 'Megabytes pro Monat',
'Member removed from Group': 'Mitglied aus Gruppe entfernt',
'Members': 'Mitglieder',
'Membership Details': 'Details zur Mitgliedschaft',
'Membership Fee': 'Mitgliedsbeitrag',
'Membership Paid': 'Kostenpflichtige Mitgliedschaft',
'Membership Types': 'Mitgliedschaftstypen',
'Membership updated': 'Mitgliedschaft aktualisiert',
'Membership': 'Mitgliedschaft',
'Memberships': 'Mitgliedschaften',
'Message Details': 'Details zur Nachricht',
'Message Log': 'Nachrichtenprotokoll',
'Message Variable': 'Nachrichtenvariable',
'Message added': 'Nachricht hinzugefügt',
'Message deleted': 'Nachricht gelöscht',
'Message updated': 'Nachricht aktualisiert',
'Message variable': 'Nachrichtenvariable',
'Message': 'Nachricht',
'Messages': 'Nachrichten',
'Messaging settings updated': 'Einstellungen zur Nachrichtenübertragung aktualisiert',
'Messaging': 'Nachrichtenübertragung',
'Measure Length: Click the points along the path & end with a double-click': 'Längenmessung: Punkte entlang eines Verlaufs anklicken und mit Doppelklick abschließen',
'Meteorite': 'Meteorit',
'Meteorological (inc. flood)': 'Meteorologisch (auch Flut)',
'Method used': 'Verwendete Methode',
'Middle Name': 'Zweiter Vorname',
'Migrants or ethnic minorities': 'Migranten oder ethnische Minderheiten',
'Military': 'Militär',
'Military Grid Reference System PDFs': 'Military Grid Reference System PDFs',
'Minimum Location Latitude': 'Minimale Geographische Breite des Gebietes',
'Minimum Location Longitude': 'Minimale Geographische Länge des Gebietes',
'Minimum shift time is 6 hours': 'Minimum Dienstzeit ist sechs Stunden.',
'Minor Damage': 'Kleinere Schäden',
'Minor/None': 'Gering / Keine',
'Minorities participating in coping activities': 'Minderheiten beteiligen sich an Bewältigungsaktivitäten / Krisenbewältigungsaktivitäten',
'Minutes must be a number between 0 and 60': 'Minuten muss eine Zahl zwischen 0 und 60 sein',
'Minutes per Month': 'Minuten pro Monat',
'Minutes should be a number greater than 0 and less than 60': 'Minuten muss eine Zahl größer als 0 und kleiner als 60 sein',
'Miscellaneous': 'Verschiedenes',
'Missed': 'Verpasst',
'Missing Person Details': 'Nähere Angaben zur vermissten Person',
'Missing Person Registry': 'Register der vermissten Personen',
'Missing Person': 'Vermisste Person',
'Missing Persons Registry': 'Register der vermissten Personen',
'Missing Persons Report': 'Bericht über vermisste Personen',
'Missing Persons': 'Vermisste Personen',
'Missing Report': 'Bericht über Vermisste',
'Missing Senior Citizen': 'Vermisster älterer Bürger',
'Missing Vulnerable Person': 'Vermisste gefährdete Person',
'Missing': 'Fehlend',
'Mission Record': 'Auftragsbericht',
'Mission added': 'Auftrag hinzugefügt',
'Mission deleted': 'Auftrag gelöscht',
'Mission updated': 'Auftrag aktualisiert',
'Missions': 'Aufträge',
'Mobile Basic Assessment': 'Mobile Grundlegende Beurteilung',
'Mobile Commons Channels': 'Mobile Commons Kanäle',
'Mobile Phone': 'Mobiltelefon',
'Mobile': 'Handy',
'Mode': 'Modus',
'Model/Type': 'Modell/Typ',
'Modem Settings': 'Modemeinstellungen',
'Modem settings updated': 'Modemeinstellungen aktualisiert',
'Moderate': 'Moderat',
'Modify Information on groups and individuals': 'Anpassen der Information über Gruppen und Einzelpersonen',
'Modifying data in spreadsheet before importing it to the database': 'Anpassen von Daten in der Tabelle vor dem Import in die Datenbank',
'Module provides access to information on current Flood Levels.': 'Modul bietet Zugriff auf Information zum aktuellen Stand der Flut',
'Module': 'Modul',
'Monday': 'Montag',
'Monetization Report': 'Monetarisierungsbericht',
'Monitoring Frequency': 'Monitoring Frequenz',
'Monthly Cost': 'Monatliche Kosten',
'Monthly Salary': 'Monatliches Gehalt',
'Month': 'Monat',
'Monthly': 'Monatlich',
'Months': 'Monate',
'More': 'Mehr',
'More Options': 'Mehr Optionen',
'Morgue Status': 'Status der Leichenhalle',
'Morgue Units Available': 'Leichenhallenplätze verfügbar',
'Mosque': 'Moschee',
'Mother': 'Mutter',
'Motorcycle': 'Motorrad',
'Moustache': 'Schnurrbart',
'MultiPolygon': 'MultiPolygon',
'Multiple Matches': 'Mehrere Übereinstimmungen',
'Multiple': 'Mehrere',
'Muslim': 'Moslem',
'Must a location have a parent location?': 'Muss ein Standort einen übergeordneten Standort haben?',
'My Current function': 'Meine aktuelle Funktion',
'My Tasks': 'Meine Aufgaben',
'My Open Tasks': 'Meine unerledigten Aufgaben',
'N/A': 'Nicht zutreffend',
'NO': 'NEIN',
'NZSEE Level 1': 'NZSEE Stufe 1',
'NZSEE Level 2': 'NZSEE Stufe 2',
'Name and/or ID': 'Name und/oder ID',
'Name of Award': 'Name der Auszeichnung',
'Name of Driver': 'Name des Fahrers',
'Name of Institute': 'Name der Institution',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name der Datei (& optionales Unterverzeichnis) die sich in static befindet und die für den Hintergrund des Headers benutzt werden soll.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name der Datei (& optionales Unterverzeichnis) die sich in static befindet und für das obere linke Bild verwendet werden soll.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name der Datei (& optionales Unterverzeichnis) die sich in views befindet und für die Fußzeile verwendet werden soll.',
'Name of the person in local language and script (optional).': 'Name der Person in lokaler Sprache und Schreibweise (optional).',
'Name': 'Name',
'Name, Org and/or ID': 'Name, Org und/oder ID',
'Names can be added in multiple languages': 'Namen können in mehreren Sprachen hinzugefügt werden',
'National ID Card': 'Nationaler Identitätsnachweis',
'National NGO': 'Nationale NGO',
'Nationality of the person.': 'Nationalität der Person.',
'Nationality': 'Nationalität',
'native': 'Muttersprache',
'Nautical Accident': 'See-Unfall',
'Nautical Hijacking': 'See-Entführung',
'Need Details': 'Details zum Bedarf',
'Need Type Details': 'Details zum Bedarfstyp',
'Need Type added': 'Bedarfstyp hinzugefügt',
'Need Type deleted': 'Bedarfstyp gelöscht',
'Need Type updated': 'Bedarfstyp aktualisiert',
'Need Type': 'Bedarfstyp',
'Need Types': 'Bedarfstypen',
'Need added': 'Bedarf hinzugefügt',
'Need deleted': 'Bedarf gelöscht',
'Need to be logged-in to be able to submit assessments': 'Sie müssen eingeloggt sein um Beurteilungen zu veröffentlichen',
'Need to configure Twitter Authentication': 'Die Twitter Authentifizierungsdaten müssen konfiguriert sein',
'Need to specify a Budget!': 'Sie müssen ein Budget angegeben!',
'Need to specify a Kit!': 'Müssen Sie eine Ausstattung (Kit) angeben!',
'Need to specify a Resource!': 'Sie müssen eine Ressource angeben.',
'Need to specify a bundle!': 'Sie müssen ein Produktpaket angeben!',
'Need to specify a group!': 'Sie müssen einen Gruppe angeben!',
'Need to specify a location to search for.': 'Sie müssen ein Gebiet/Position für die Suche angeben.',
'Need to specify a role!': 'Sie müssen eine Rolle definieren!',
'Need to specify a table!': 'Sie müssen einen Tabellennamen angeben!',
'Need to specify a user!': 'Ein Benutzer muss angegeben werden!',
'Need updated': 'Bedarf aktualisiert',
'Needs Details': 'Details zum Bedarf',
'Needs Maintenance': 'Braucht Wartung',
'Needs to reduce vulnerability to violence': 'Handlungsbedarf um die Anfälligkeit für Gewalt zu verringern',
'Need': 'Bedarf',
'Needs': 'Bedarf',
'Neighborhood': 'Nachbarschaft',
'Neighbouring building hazard': 'Risiko durch benachbarte Gebäude',
'Neonatal ICU': 'Neugeborenen ICU',
'Neonatology': 'Neonatologie',
'Network': 'Netzwerk',
'Neurology': 'Neurologie',
'New Assessment reported from': 'Neue Beurteilung erstellt durch',
'New Certificate': 'Neues Zertifikat',
'New Checklist': 'Neue Prüfliste',
'New Entry': 'Neuer Eintrag',
'New Event': 'Neues Ereignis',
'New Item Category': 'Neue Kategorie für Artikel',
'New Job Role': 'Neue Tätigkeit',
'New Location Group': 'Neue Standortgruppe',
'New Location': 'Neuer Standort/Gebiet',
'New Peer': 'Neuer Peer',
'New Record': 'Neuer Datensatz',
'New Request': 'Neue Anfrage',
'New Role': 'Neue Rolle',
'New Scenario': 'Neues Szenario',
'New Skill': 'Neue Fähigkeit',
'New Solution Choice': 'Neue Lösungswahl',
'New Staff Member': 'Neue Mitarbeiter',
'New Stock Count': 'Neue Anzahl des Lagerbestands',
'New Support Request': 'Neue Unterstützunganfrage',
'New Synchronization Peer': 'Neuer Synchronisations Peer',
'New Team': 'Neues Team',
'New Training Course': 'Neuer Schulungskurs',
'New Volunteer': 'Neuer Freiwilliger',
'New cases in the past 24h': 'Neue Fälle in den letzten 24h',
'New': 'Neu',
'Next': 'Nächste',
'No': 'Nein',
'No Activities Found': 'Keine Aktivitäten gefunden',
'No Alternative Items currently registered': 'Zurzeit sind keine alternativen Artikel registriert',
'No Assessment Summaries currently registered': 'Zurzeit sind keine Beurteilungszusammenfassungen registriert',
'No Assessments currently registered': 'Zurzeit sind keine Beurteilungen registriert.',
'No Assets currently registered in this event': 'Zurzeit sind keine Anlagen zu diesem Ereignis registriert',
'No Assets currently registered in this scenario': 'Zurzeit sind keine Anlagen zu diesem Szenario registriert',
'No Assets currently registered': 'Zurzeit sind keine Anlagen registriert',
'No Baseline Types currently registered': 'Zurzeit sind keine Referenzdatumstypen registriert',
'No Baselines currently registered': 'Zurzeit sind keine Referenzdaten registriert',
'No Brands currently registered': 'Zurzeit sind keine Markenregistriert',
'No Budgets currently registered': 'Zurzeit sind keine Budgets registriert',
'No Bundles currently registered': 'Zurzeit sind keine Produktpakete registriert',
'No Camp Services currently registered': 'Zurzeit sind keine Camp-Leistungen registriert',
'No Camp Types currently registered': 'Zurzeit sind keine Typen von Camps registriert',
'No Camps currently registered': 'Zurzeit sind keine Camps registriert',
'No Catalog Items currently registered': 'Zurzeit sind keine Katalogeinträge registriert',
'No Catalogs currently registered': 'Zurzeit sind keine Kataloge registriert',
'No Checklist available': 'Zurzeit sind keine Checklisten verfügbar',
'No Cluster Subsectors currently registered': 'Zurzeit sind keine Cluster Teilbereiche registriert',
'No Clusters currently registered': 'Zurzeit sind keine Cluster registriert',
'No Commitment Items currently registered': 'Zurzeit sind keine zugesagten Artikel registriert',
'No Commitments': 'Zurzeit sind keine Zusagen registriert',
'No Credentials currently set': 'Derzeit keine Berechtigungen hinterlegt',
'No Details currently registered': 'Zurzeit sind keine Details registriert',
'No Documents found': 'Keine Dokumente gefunden',
'No Donors currently registered': 'Zurzeit sind keine Spender registriert',
'No Events currently registered': 'Zurzeit sind keine Ereignisse registriert',
'No Facilities currently registered in this event': 'Für dieses Ereignis ist zurzeit keine Einrichtung registriert',
'No Facilities currently registered in this scenario': 'Für dieses Szenario ist zurzeit keine Einrichtung registriert.',
'No Feature Layers currently defined': 'Zurzeit sind keine Objekt-Layer definiert',
'No Flood Reports currently registered': 'Zurzeit sind keine Flutberichte registriert',
'No Groups currently defined': 'Zurzeit sind keine Gruppen definiert',
'No Groups currently registered': 'Zurzeit sind keine Gruppen registriert',
'No Hospitals currently registered': 'Zurzeit sind keine Krankenhäuser registriert',
'No Human Resources currently registered in this event': 'Für dieses Ereignis sind zurzeit keine personellen Ressourcen registriert.',
'No Human Resources currently registered in this scenario': 'Für dieses Szenario sind zurzeit keine personellen Ressourcen registriert.',
'No Identification Report Available': 'Kein Identifizierungbericht verfügbar',
'No Identities currently registered': 'Zurzeit sind keine Identitäten registriert',
'No Image': 'Kein Bild',
'No Images currently registered': 'Zurzeit sind keine Bilder registriert',
'No Impact Types currently registered': 'Zurzeit sind keine Auswirkungsarten registriert',
'No Impacts currently registered': 'Zurzeit sind keine Auswirkungen registriert',
'No Incident Reports currently registered': 'Zurzeit sind keine Vorfallberichte registriert',
'No Incoming Shipments': 'Keine eingehenden Lieferungen',
'No Item Categories currently registered': 'Zurzeit sind keine Artikelkategorien registriert',
'No Item Packs currently registered': 'Zurzeit sind keine Artikelpakete registriert',
'No Items currently registered in this Inventory': 'Für diesen Bestand sind zurzeit keine Artikel registriert',
'No Items currently registered': 'Zurzeit sind keine Artikel registriert',
'No Keys currently defined': 'Zurzeit sind keine Schlüssel definiert',
'No Kits currently registered': 'Zurzeit sind keine Ausstattungen (Kits) definiert',
'No Level 1 Assessments currently registered': 'Zurzeit keine Stufe 1 Beurteilungen registriert',
'No Level 2 Assessments currently registered': 'Zurzeit keine Stufe 2 Beurteilungen registriert',
'No Locations currently available': 'Keine Standorte/Gebiete verfügbar',
'No Locations currently registered': 'Zurzeit sind keine Standorte/Gebiete registriert',
'No Map Profiles currently defined': 'Zurzeit sind keine Kartenkonfigurationen definiert',
'No Map Profiles currently registered in this event': 'Für dieses Ereignis sind zurzeit keine Kartenkonfigurationen registriert',
'No Map Profiles currently registered in this scenario': 'Für dieses Szenario sind zurzeit keine Kartenkonfigurationen registriert',
'No Markers currently available': 'Zurzeit sind keine Marker/Symbole verfügbar',
'No Match': 'Keine Übereinstimmung',
'No Matching Catalog Items': 'Keine passenden Katalogelemente',
'No Matching Items': 'Keine passenden Artikel',
'No Matching Records': 'Keine passenden Datensätze',
'No Members currently registered': 'Zurzeit sind keine Mitglieder registriert',
'No Memberships currently defined': 'Zurzeit sind keine Mitgliedschaften definiert',
'No Messages currently in Outbox': 'Zurzeit sind keine Nachrichten im Postausgang',
'No Need Types currently registered': 'Zurzeit sind keine Anforderungstypen registriert',
'No Needs currently registered': 'Zurzeit sind keine Anforderungen registriert',
'No Offices currently registered': 'Zurzeit sind keine Büros registriert',
'No Offices found!': 'Keine Büros gefunden!',
'No Organizations currently registered': 'Zurzeit sind keine Organisationen registriert',
'No options available': 'Keine Optionen verfügbar',
'No People currently registered in this camp': 'Zurzeit sind in diesem Camp keine Personen registriert',
'No People currently registered in this shelter': 'Zurzeit sind in dieser Unterkunft keine Personen registriert',
'No Persons currently registered': 'Zurzeit sind keine Personen registriert',
'No Persons currently reported missing': 'Zurzeit sind keine Personen vermisst gemeldet',
'No Persons found': 'Keine Personen gefunden',
'No Photos found': 'Keine Fotos gefunden',
'No Picture': 'Kein Bild',
'No Population Statistics currently registered': 'Zurzeit sind keine Bevölkerungsstatistiken registriert',
'No Presence Log Entries currently registered': 'Zurzeit gibt es keine Anwesenheitsprotokolleinträge',
'No Problems currently defined': 'Zurzeit sind keine Probleme definiert',
'No Projections currently defined': 'Zurzeit sind keine Kartenprojektionen definiert',
'No Projects currently registered': 'Zurzeit sind keine Projekte registriert',
'No Rapid Assessments currently registered': 'Zurzeit sind keine Schnell-Beurteilungen registriert',
'No Received Items currently registered': 'Zurzeit sind keine erhaltenen Lieferungen registriert',
'No Received Shipments': 'Keine erhaltene Lieferungen',
'No Records currently available': 'Zurzeit sind keine Datensätze registriert',
'No Request Items currently registered': 'Zurzeit sind keine angefragten Artikel registriert',
'No Requests': 'Keine Anfragen',
'No Rivers currently registered': 'Zurzeit sind keine Flüsse registriert',
'No Roles currently defined': 'Zurzeit sind keine Rollen definiert',
'No Rooms currently registered': 'Zurzeit sind keine Räume registriert',
'No Scenarios currently registered': 'Derzeit sind keine Szenarios eingetragenZurzeit sind keine Szenarios registriert',
'No Sections currently registered': 'Zurzeit sind keine Abschnitte registriert',
'No Sectors currently registered': 'Zurzeit sind keine Bereiche registriert',
'No Sent Items currently registered': 'Zurzeit sind keine gesendeten Artikel registriert',
'No Sent Shipments': 'Keine versandten Lieferungen',
'No Settings currently defined': 'Zurzeit sind keine Einstellungen definiert',
'No Shelter Services currently registered': 'Zurzeit sind keine Unterkunftsleistungen registriert',
'No Shelter Types currently registered': 'Zurzeit sind keine Unterkunfttypen registriert',
'No Shelters currently registered': 'Zurzeit sind keine Unterkünfte registriert',
'No Solutions currently defined': 'Zurzeit sind keine Lösungen definiert',
'No Staff Types currently registered': 'Zurzeit sind keine Mitarbeitertypen registriert',
'No Subscription available': 'Keine Abonnements verfügbar',
'No Subsectors currently registered': 'Zurzeit sind keine Teilbereiche registriert',
'No Support Requests currently registered': 'Zurzeit sind keine Unterstützungsanfragen registriert',
'No Survey Answers currently entered.': 'Zurzeit wurden noch keine Antworten auf Umfragen eingegeben.',
'No Survey Questions currently registered': 'Zurzeit wurden noch keine Umfragen-Fragen registriert. ',
'No Survey Series currently registered': 'Zurzeit wurden noch keine Umfragenserie registriert',
'No Survey Template currently registered': 'Zurzeit wurden noch keine Umfragen-Vorlage registriert',
'No Tasks with Location Data': 'Für dieses Gebiet/Standort liegen zurzeit keine Aufgaben vor',
'No Teams currently registered': 'Zurzeit wurden noch keine Teams registriert',
'No Themes currently defined': 'Zurzeit wurden noch keine Themen registriert',
'No Tickets currently registered': 'Zurzeit wurden noch keine Tickets registriert',
'No Tracks currently available': 'Zurzeit sind noch keine Tracks verfügbar',
'No Users currently registered': 'Zurzeit wurden noch keine Benutzer registriert',
'No Volunteers currently registered': 'Zurzeit sind noch keine Freiwilligen registriert',
'No Warehouses currently registered': 'Zurzeit sind noch keine Warenlager registriert',
'No access at all': 'Kein Zugriff',
'No access to this record!': 'Kein Zugriff auf diesen Datensatz!',
'No action recommended': 'Keine Aktion empfohlen',
'No conflicts logged': 'Keine Konflikte protokolliert',
'No contact information available': 'Keine Kontaktinformation verfügbar',
'No contacts currently registered': 'Zurzeit sind noch keine Kontakte registriert',
'No data available': 'Keine Daten verfügbar',
'No data in this table - cannot create PDF!': 'Keine Daten in dieser Tabelle - PDF kann nicht erstellt werden!',
'No databases in this application': 'Keine Datenbanken in dieser Anwendung',
'No dead body reports available': 'Keine Leichenberichte verfügbar',
'No entries found': 'Keine Einträge gefunden',
'No entries matching the query': 'Die Abfrage lieferte keine Einträge',
'No entry available': 'Kein Eintrag verfügbar',
'No location known for this person': 'Für diese Person ist kein Gebiet/Standort bekannt',
'No locations found for members of this team': 'Für Mitglieder dieses Teams ist kein Gebiet/Standort bekannt',
'No log entries matching the query': 'Die Abfrage lieferte keine Protokolleinträge',
'No messages in the system': 'Keine Nachrichten im System',
'No peers currently registered': 'Zurzeit sind keine Peers registriert',
'No pending registrations found': 'Keine anstehenden Registrierungen gefunden',
'No pending registrations matching the query': 'Die Abfrage lieferte keine keine anstehenden Registrierungen',
'No person record found for current user.': 'Kein Personendatensatz für den aktuellen Benutzer gefunden.',
'No problem group defined yet': 'Noch keine Problem-Gruppe definiert',
'No records found': 'Keine Datensätze gefunden',
'No records matching the query': 'Die Abfrage lieferte keine Datensätze',
'No reports available.': 'Keine Berichte verfügbar.',
'No reports currently available': 'Zurzeit sind keine Berichte verfügbar',
'No requests found': 'Keine Anfragen gefunden',
'No resources currently reported': 'Zurzeit sind keine Ressourcen gemeldet',
'No service profile available': 'Kein Leistungsprofil verfügbar',
'No skills currently set': 'Zurzeit sind keine Fähigkeiten festgelegt',
'No staff or volunteers currently registered': 'Zurzeit sind weder Mitarbeiter noch Freiwillige registriert',
'No status information available': 'Keine Statusinformation verfügbar',
'No synchronization': 'Keine Synchronisation',
'No tasks currently registered': 'Zurzeit sind keine Aufgaben registriert',
'No template found!': 'Keine Vorlage gefunden!',
'No units currently registered': 'Zurzeit sind keine Einheiten registriert',
'No volunteer availability registered': 'Zurzeit ist keine Verfügbarkeit von Freiwilligen registriert',
'Non-structural Hazards': 'Nicht-strukturelle Gefahren',
'None (no such record)': 'Nichts (kein entsprechender Datensatz)',
'None': '-',
'Noodles': 'Nudeln',
'Normal Address': 'Normale Adresse',
'Normal Job': 'Normaler Beruf',
'Not Applicable': 'Nicht zutreffend',
'Not Authorised!': 'Nicht berechtigt!',
'Not Possible': 'Nicht möglich',
'Not Set': 'Nicht festgelegt',
'Not Authorized': 'Nicht berechtigt',
'Not installed or incorrectly configured.': 'Nicht installiert oder nicht korrekt konfiguriert.',
'Not yet a Member of any Group': 'Bis jetzt kein Mitglied irgendeiner Gruppe',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Beachten Sie, dass diese Liste nur aktive Freiwillige zeigt. Um alle registrierten Personen im System zu sehen, suchen sie statt dessen auf diesem Bildschirm',
'Notice to Airmen': 'Hinweis für Flieger',
'Notify': 'Benachrichtigen',
'Number': 'Anzahl',
'Number of Barges': 'Zahl der Lastschiffe',
'Number of Columns': 'Anzahl der Spalten',
'Number of Patients': 'Anzahl der Patienten',
'Number of People Required': 'Anzahl der benötigten Personen',
'Number of Rows': 'Anzahl der Reihen',
'Number of Tugboats': 'Zahl der Schleppkähne',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Anzahl von zusätzlichen Betten dieses Typs, die voraussichtlich in den nächsten 24 Stunden in dieser Einheit zur Verfügung stehen werden.',
'Number of alternative places for studying': 'Anzahl von alternativen Orten zum studieren.',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Anzahl von verfügbaren/freien Betten dieses Typs in dieser Einheit zum Zeitpunkt des Berichtes.',
'Number of deaths during the past 24 hours.': 'Anzahl von Toten in den letzten 24 Stunden',
'Number of discharged patients during the past 24 hours.': 'Anzahl der entlassenen Patienten in den vergangen 24 Stunden',
'Number of doctors': 'Anzahl der Ärzte',
'Number of in-patients at the time of reporting.': 'Anzahl von in-Patienten zum Zeitpunkt der Berichterstellung',
'Number of newly admitted patients during the past 24 hours.': 'Anzahl der neu zugewiesenen Patienten innerhalb der letzten 24 Stunden',
'Number of non-medical staff': 'Anzahl des nicht-medizinischen Personals',
'Number of nurses': 'Anzahl der Krankenschwestern',
'Number of private schools': 'Anzahl der privaten Schulen',
'Number of public schools': 'Anzahl der öffentlichen Schulen',
'Number of religious schools': 'Anzahl der religiösen Schulen',
'Number of residential units not habitable': 'Anzahl der nicht bewohnbaren Wohneinheiten',
'Number of residential units': 'Anzahl der Wohneinheiten',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Anzahl der freien/verfügbaren Betten in diesem Krankenhaus. Automatisch aktualisiert aus täglichen Berichten.',
'Number of vacant/available units to which victims can be transported immediately.': 'Anzahl der freien/verfügbaren Einheiten zu denen die Opfer sofort transportiert werden können.',
'Number or Label on the identification tag this person is wearing (if any).': 'Nummer oder Beschriftung auf der Identifikationsmarke den diese Person trägt (falls vorhanden).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Nummer oder Code verwendet markiert den Fundort , z. B. Flaggencode, Koordinaten, Standortnummer oder ähnliches (falls verfügbar)',
'Number': 'Nummer',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 0-5 Jahren',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 13-17 Jahren',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 18-25 Jahren',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 26-60 Jahren',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 6-12 Jahren',
'Number/Percentage of affected population that is Female & Aged 61+': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung über 61',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 0-5 Jahren',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 13-17 Jahren',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 18-25 Jahren',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 26-60 Jahren',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 6-12 Jahren',
'Number/Percentage of affected population that is Male & Aged 61+': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung über 61',
'Nursery Beds': 'Krankenhausbetten',
'Nutrition problems': 'Ernährungsprobleme',
'Nutrition': 'Nahrung',
'Opportunities to Volunteer On-Site?': 'Möglichkeiten für Freiwillige vor Ort?',
'OR Reason': 'oder Grund',
'OR Status Reason': 'oder Statusgrund',
'OR Status': 'oder Status',
'Observer': 'Beobachter',
'Obsolete': 'Veraltet',
'Obstetrics/Gynecology': 'Geburtshilfe/Gynäkologie',
'Office Address': 'Büroadresse',
'Office Details': 'Bürodetails',
'Office Phone': 'Telefon im Büro',
'Office Type': 'Bürotyp',
'Office Types': 'Bürotypen',
'Office added': 'Büro hinzugefügt',
'Office deleted': 'Büro gelöscht',
'Office updated': 'Büro aktualisiert',
'Office': 'Büro',
'Offices & Warehouses': 'Büros & Warenager',
'Offices': 'Büros',
'Offline Sync (from USB/File Backup)': 'Offline-Synchronisation (von USB/Dateisicherung)',
'Offline Sync': 'Offline-Synchronisation',
'Oil Terminal Depth': 'Tiefe des Ölterminals',
'Older people as primary caregivers of children': 'Ältere Menschen als primäre Pfleger von Kindern',
'Older people in care homes': 'Ältere Menschen in Pflegeheimen',
'Older people participating in coping activities': 'Ältere Menschen die sich an Krisenbewältigungsaktivitäten beteiligen',
'Older person (>60 yrs)': 'Ältere Personen (> 60 Jahre)',
'On by default? (only applicable to Overlays)': 'Standardmäßig an? (gilt nur für Overlays)',
'On by default?': 'Standardmäßig an?',
'On Hold': 'Abwarten',
'One Time Cost': 'Einmalige Kosten',
'One time cost': 'Einmalige Kosten',
'One-time costs': 'Einmalige Kosten',
'One-time': 'Einmalig',
'Oops! Something went wrong...': 'Hoppla! Etwas ging schief...',
'Oops! something went wrong on our side.': 'Hoppla! Etwas ging auf unserer Seite schief.',
'Opacity (1 for opaque, 0 for fully-transparent)': 'Opazität (1 für opaque - undurchsichtig, 0 für vollständig transparent)',
'Opacity': 'Opazität (Undurchsichtigkeit)',
'Open area': 'Offener Bereich',
'Open recent': 'Kürzlich Bearbeitetes öffnen',
'Open': 'Öffnen',
'Opening Times': 'Öffnungszeiten',
'OpenStreetMap Tiles': 'OpenStreetMap Tiles',
'OpenWeatherMap data': 'OpenWeatherMap Daten',
'Operating Rooms': 'Betriebsräume',
'Optional link to an Incident which this Assessment was triggered by.': 'Optinaler Link zum einem Vorfall, der diese Beurteilung auslöste.',
'Optional': 'Optional',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Optional. Wenn Sie die Darstellung der Objekte auf der Basis von Werten eines Attributs festlegen möchten, wählen sie das zu verwendende Attribut hier aus.',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. Bei GeoServer, das ist die Arbeitsbereich Namespace-URI (nicht der Name!). Beim WFS "Capabilities", ist dies die Namensteil des FeatureTypes vor dem Doppelpunkt(:).',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Optional. Der Name eines Elements dessen Inhalt eine URL zu einer Bilddatei die im Dialogfenster angezeigt werden soll.',
'Optional. The name of an element whose contents should be put into Popups.': 'Optional. Name eines Elements, dessen Inhalt in Dialogfenstern angezeigt wird.',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Optional. Name des Schemas. Bei Geoserver wird das Format http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name verwendet.',
'Options': 'Optionen',
'Organization Details': 'Details zur Organisation',
'Organization Domains': 'Organisationsdomains',
'Organization Registry': 'Organisationsdatenbank',
'Organization Type': 'Organisationstyp',
'Organization Types': 'Organisationstypen',
'Organization added': 'Organisation hinzugefügt',
'Organization deleted': 'Organisation gelöscht',
'Organization updated': 'Organisation aktualisiert',
'Organization': 'Organisation',
'Organizations': 'Organisationen',
'Organization/Supplier': 'Organisation/Anbieter',
'Organized By': 'Organisiert durch',
'Origin of the separated children': 'Ursprung der getrennten Kinder',
'Origin': 'Ursprung',
'Other Address': 'Andere Adresse',
'Other (describe)': 'Andere (näher beschreiben)',
'Other (specify)': 'Sonstige (näher spezifizieren)',
'Other Evidence': 'Anderer Nachweis',
'Other Faucet/Piped Water': 'Andere Wasserrohre/-hähne',
'Other Isolation': 'Andere Isolierung',
'Other Name': 'Sonstiger Name',
'Other activities of boys 13-17yrs before disaster': 'Andere Aktivitäten von Jungen 13-17 Jahre vor der Katastrophe',
'Other activities of boys 13-17yrs': 'Andere Aktivitäten der Jungen 13-17 Jahre',
'Other activities of boys <12yrs before disaster': 'Andere Aktivitäten von Jungen <12 Jahre vor der Katastrophe',
'Other activities of boys <12yrs': 'Andere Aktivitäten von Jungen <12 Jahren',
'Other activities of girls 13-17yrs before disaster': 'Andere Aktivitäten von Mädchen 13-17 Jahre vor der Katastrophe',
'Other activities of girls 13-17yrs': 'Andere Aktivitäten von Mädchen 13-17 Jahre',
'Other activities of girls<12yrs before disaster': 'Andere Aktivitäten von Mädchen <12 Jahre vor der Katastrophe',
'Other activities of girls<12yrs': 'Andere Aktivitäten von Mädchen <12 Jahre',
'Other alternative infant nutrition in use': 'Andere alternative Kindernahrung die Verwendung findet.',
'Other alternative places for study': 'Andere alternative Orte zum Lernen',
'Other assistance needed': 'Andere Unterstützung benötigt',
'Other assistance, Rank': 'Andere Unterstützung, Rang',
'Other current health problems, adults': 'Andere aktuelle gesundheitliche Probleme, Erwachsene',
'Other current health problems, children': 'Andere aktuelle gesundheitliche Probleme, Kinder',
'Other events': 'Sonstige Ereignisse',
'Other factors affecting school attendance': 'Andere Faktoren mit Einfluss auf den Schulbesuch',
'Other major expenses': 'Andere große Ausgaben',
'Other non-food items': 'Andere non-food Posten',
'Other recommendations': 'Andere Empfehlungen',
'Other residential': 'Andere Bewohner/innen',
'Other school assistance received': 'Andere erhaltene Schulunterstützung',
'Other school assistance, details': 'Andere Schulhilfe, Einzelheiten',
'Other school assistance, source': 'Herkunft anderer Schulhilfen',
'Other settings can only be set by editing a file on the server': 'Andere Einstellungen können nur durch Bearbeiten einer Datei auf dem Server festgelegt werden',
'Other side dishes in stock': 'Andere Speisen auf Lager',
'Other types of water storage containers': 'Andere Arten von Wassertanks',
'Other ways to obtain food': 'Weitere Möglichkeiten um an Nahrungsmitteln zu gelangen',
'Other': 'Sonstige',
'Outbound Mail settings are configured in models/000_config.py.': 'Abgehende Mail-Einstellungen werden in der Datei models/000_config.py konfiguriert.',
'Outbox': 'Ausgang',
'Outgoing SMS Handler': 'SMS-Handler für ausgehende Informationen',
'Outgoing SMS handler': 'SMS-Handler für ausgehende Informationen',
'Overall Hazards': 'Gefahren insgesamt',
'Overhead falling hazard': 'Gefahr fallender Objekte',
'Overland Flow Flood': 'Überflutung',
'Owned By (Organization/Branch)': 'Gehört (Organisation/Niederlassung)',
'Owned Records': 'Eigene Datensätze',
'Owned Resources': 'Eigene Ressourcen',
'Ownership': 'Eigentum',
'Owning Organization': 'In Eigentum von',
'PIN number': 'PIN Nummer',
'PIN': 'PIN',
'PL Women': 'PL Frauen',
'Pack': 'Packung',
'Packs': 'Packungen',
'Paid': 'Bezahlt',
'Parameters': 'Parameter',
'Parapets, ornamentation': 'Geländer, Verzierung',
'Parent Office': 'Übergeordnetes Büro',
'Parent needs to be of the correct level': 'Übergeordnetes Element muss auf der richtigen Stufe sein',
'Parent needs to be set for locations of level': 'Ein übergeordnetes Element muss für Gebiete/Standorte dieser Stufe existieren',
'Parent needs to be set': 'Ein übergeordnetes Element muss definiert werden',
'Parent': 'Übergeordnetes Element',
'Parents/Caregivers missing children': 'Eltern/Pfleger vermissen Kinder',
'Parser Connections': 'Parser Verbindungen',
'Parsers': 'Parser',
'Partial': 'partiell',
'Participant': 'Teilnehmer',
'Pashto': 'Paschtu',
'Pass': 'Übergeben',
'Passport': 'Reisepass',
'Password': 'Passwort',
'Path': 'Pfad',
'Pathology': 'Pathologie',
'Patients': 'Patienten',
'Payload Height (m)': 'Ladekapazität Höhe (m)',
'Payload Length (m)': 'Ladekapazität Länge (m)',
'Payload Volume (m3)': 'Ladekapazität Volumen (m3)',
'Payload Weight (kg)': 'Ladekapazität Gewicht (kg)',
'Payload Width (m)': 'Ladekapazität Breite (m)',
'Pediatric ICU': 'Kinderklinik ICU',
'Pediatric Psychiatric': 'Kinderpsychiatrie',
'Pediatrics': 'Kinderheilkunde',
'Peer Details': 'Details zu Peers',
'Peer Registration Details': 'Details zur Peer-Registrierung',
'Peer Registration Request': 'Anfrage zu Peer-Registrierung',
'Peer Registration': 'Peer-Registrierung',
'Peer Type': 'Peer Typ',
'Peer UID': 'Peer UID',
'Peer added': 'Peer hinzugefügt',
'Peer deleted': 'Peer gelöscht',
'Peer not allowed to push': 'Peer ist nicht für das pushen von Daten zugelassen',
'Peer registration request added': 'Anfrage zu Peer-Registrierung hinzugefügt',
'Peer registration request deleted': 'Anfrage zu Peer-Registrierung gelöscht',
'Peer registration request updated': 'Anfrage zu Peer-Registrierung aktualisiert',
'Peer updated': 'Peer aktualisiert',
'Peer': 'Peer',
'Pending Requests': 'Anstehende Anfragen',
'Pending': 'Anstehend',
'People Needing Food': 'Personen die Nahrungsmittel brauchen',
'People Needing Shelter': 'Personen die Unterkünfte brauchen',
'People Needing Water': 'Personen die Wasser brauchen',
'People Reservation': 'Gruppe reservieren',
'People Registration': 'Person registrieren',
'People Trapped': 'Eingeschlossene Personen',
'People': 'Personen',
'Performance Rating': 'Ergebnisbeurteilung',
'Permanent Home Address': 'Dauerhafte Heimatadresse',
'Person 1, Person 2 are the potentially duplicate records': 'Person 1 und Person 2 sind möglicherweise Duplikate',
'Person De-duplicator': 'Dubletten in Personen auflösen',
'Person Details': 'Details zur Person',
'Person Registry': 'Personendatenbank',
'Person added to Group': 'Person einer Gruppe hinzugefügt',
'Person added to Team': 'Person einem Team hinzugefügt',
'Person added': 'Person hinzugefügt',
'Person deleted': 'Person gelöscht',
'Person details updated': 'Details zur Person aktualisiert',
'Person interviewed': 'Person befragt',
'Person or OU': 'Person oder Organisationseinheit',
'Person who has actually seen the person/group.': 'Person, die kürzlich die Person/Gruppe gesehen hat',
'Person/Group': 'Person/Gruppe',
'Personal Data': 'Persönliche Daten',
'Personal Effects Details': 'Details zur persönlichen Habe',
'Personal Effects': 'Persönliche Habe',
'Personal Map': 'Persönliche Karte',
'Personal Profile': 'Persönliches Profil',
'Personal impact of disaster': 'Persönliche Auswirkung der Katastrophe',
'Persons in institutions': 'Personen in Institutionen',
'Persons with disability (mental)': 'Personen mit Behinderungen (psychischen)',
'Persons with disability (physical)': 'Personen mit Behinderungen (körperlichen)',
'Person': 'Person',
'Persons by Age Group': 'Personen nach Altersgruppen',
'Persons by Gender': 'Personen nach Geschlecht',
'Persons': 'Personen',
'Phone 1': 'Telefon 1',
'Phone 2': 'Telefon 2',
'Phone #': 'Telefon #',
'Phone': 'Telefon',
'Phone/Business': 'Telefon/Geschäftlich',
'Phone/Emergency': 'Telefon/Notfall',
'Phone/Exchange (Switchboard)': 'Telefon/Exchange (Hauptschalttafel)',
'Photo Details': 'Foto Details',
'Photo Taken?': 'Foto gemacht?',
'Photo added': 'Foto hinzugefügt',
'Photo deleted': 'Foto gelöscht',
'Photo updated': 'Foto aktualisiert',
'Photo': 'Foto',
'Photograph': 'Fotografie',
'Photos': 'Fotos',
'Physical Description': 'Physische Beschreibung',
'Physical Safety': 'Physische Sicherheit',
'Picture upload and finger print upload facility': 'Einrichtung um Foto und Fingerabdruck hochzuladen',
'Picture': 'Bild',
'Place of Recovery': 'Ort der Wiederherstellung',
'Place on Map': 'Auf Karte plazieren',
'Places for defecation': 'Plätze für Kotablagerung',
'Places the children have been sent to': 'Orte an die Kinder geschickt wurden',
'Planned': 'Geplant',
'Planned on': 'Geplant am',
'Planning': 'In Planung',
'Playing': 'Wiedergabe',
'Please correct all errors.': 'Korrigieren Sie bitte alle Fehler.',
'Please enter a first name': 'Bitte geben Sie den Vornamen ein',
'Please enter a site OR a location': 'Bitte geben Sie eine Stelle oder einen Standort/Gebiet an',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Bitte geben sie die ersten Buchstaben der Person/Gruppe ein um die Autovervollständigung zu starten.',
'Please enter the recipient': 'Bitte geben sie den Empfänger ein',
'Please fill this!': 'Bitte hier einfüllen!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Bitte geben Sie die URL der Seite auf die sie sich beziehen, eine Beschreibung dessen, was sie erwartet haben & was wirklich passiert ist.',
'Please report here where you are:': 'Bitte hier angeben, wo sie sich befinden:',
'Please select another level': 'Bitte wählen Sie eine andere Ebene',
'Please select': 'Treffen Sie eine Auswahl',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Bitte melden Sie sich unter Angabe Ihrer Mobilfunknummer an. Das erlaubt uns Ihnen Textnachrichten zu senden. Bitten verwenden Sie die internationale Nummer ein (Deutschland: 0049.... - ohne führende 0).',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Bitte geben Sie alle Probleme und Hindernisse bei der korrekten Behandlung der Krankheit an, im Detail (in Zahlen, falls zutreffend). Sie können auch Vorschläge machen wie die Situation verbessert werden kann.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Bitte dieses Feld verwenden um zusätzliche Informationen zu hinterlegen, einschließlich der Datensatzhistorie, falls dieser aktualisiert wurde.',
'Please use this field to record any additional information, including any Special Needs.': 'Bitte dieses Feld verwenden um zusätzliche Informationen, einschließlich besonderer Anforderungen, zu hinterlegen.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Bitte dieses Feld verwenden um zusätzliche Informationen, wie die Ushahidi Vorgangs-ID, zu hinterlegen, einschließlich der Datensatzhistorie, falls dieser aktualisiert wurde.',
'Pledge Support': 'Zusage von Unterstützung',
'PO': 'PO',
'PO Number': 'PO Nummer',
'PoI Types': 'PoI Typen',
'POIS': 'PoIs',
'Point': 'Point',
'Points of Interest': 'Points of Interest',
'Poisoning': 'Vergiftung',
'Poisonous Gas': 'Gasvergiftung',
'Police': 'Polizei',
'Pollution and other environmental': 'Verschmutzung und andere Umwelt',
'Polygon reference of the rating unit': 'Polygonale Abgrenzung der Bewertungseinheit',
'Poor': 'Arm',
'Population Statistic Details': 'Details zur Bevölkerungsstatistik',
'Population Statistic added': 'Bevölkerungsstatistik hinzugefügt',
'Population Statistic deleted': 'Bevölkerungsstatistik gelöscht',
'Population Statistic updated': 'Bevölkerungsstatistik aktualisiert',
'Population Statistics': 'Bevölkerungsstatistiken',
'Population and number of households': 'Bevölkerungs- und Haushaltsanzahl',
'Population': 'Belegung',
'Popup Fields': 'Popup Felder',
'Popup Label': 'Popup Beschriftung',
'Porridge': 'Haferbrei',
'Port Closure': 'Hafenschließung',
'Port': 'Port',
'Portable App': 'Portable App',
'Position Catalog': 'Stanpunktkatalog',
'Position added': 'Standpunkt hinzugefügt',
'Position deleted': 'Standpunkt gelöscht',
'Position updated': 'Standpunkt aktualisiert',
'Positions': 'Positionen',
'Postcode': 'PLZ',
'Posted on': 'Geposted auf',
'Posts can be either full pages, embedded within other pages or part of a series (for use as news items or blog posts)': 'Posts können entweder komplette Seiten, die in anderen Seiten eingebettet wurden oder Teile einer Serie sein (z.B. zur Nutzung als Newseintrag oder Blog Post)',
'Poultry restocking, Rank': 'Geflügel auffüllen, Rank',
'Poultry': 'Geflügel',
'Pounds': 'Pfund',
'Power Failure': 'Netzausfall',
'Power': 'Stromversorgung',
'Powered by Sahana Eden': 'Powered by Sahana Eden',
'Pre-cast connections': 'Beton Verbindungen',
'Preferred Name': 'Bevorzugter Name',
'Pregnant women': 'Schwangere Frauen',
'Preliminary': 'Vorläufig',
'Presence Condition': 'Anwesenheitsbedingung',
'Presence Log': 'Anwesenheitsprotokollierung',
'Presence in the shelter': 'Anwesend in Unterkunft',
'Presence': 'Anwesenheit',
'Previous': 'Vorherige',
'Primary Occupancy': 'Primäre Belegung',
'Priority from 1 to 9. 1 is most preferred.': 'Priorität von 1 bis 9. 1 ist die am meisten bevorzugte.',
'Priority': 'Priorität',
'Privacy': 'Datenschutz',
'Private': 'Privat',
'Problem Administration': 'Verwaltung von Problemen',
'Problem Details': 'Problemdetails',
'Problem Group': 'Problemgruppe',
'Problem Title': 'Problemtitel',
'Problem added': 'Problem hinzugefügt',
'Problem connecting to twitter.com - please refresh': 'Verbindungsproblem zu twitter.com - bitte neu laden',
'Problem deleted': 'Problem gelöscht',
'Problem updated': 'Problem aktualisiert',
'Problem': 'Problem',
'Problems': 'Probleme',
'Procedure': 'Vorgehensweise',
'Process Received Shipment': 'Bearbeiten der erhaltenen Lieferung',
'Process Shipment to Send': 'Vorbereiten der Lieferung zum Versenden',
'Procurement & Logistics cost': 'Kosten für Beschaffung & Logistik',
'Profession': 'Beruf',
'Profile': 'Profil',
'Profile Details': 'Details zum Profil',
'Profile Picture?': 'Profilbild?',
'Program Hours (Month)': 'Programmstunden (Monat)',
'Program Hours (Year)': 'Programmstunden (Jahr)',
'Program': 'Programm',
'Programs': 'Programme',
'Proj4js definition': 'Proj4js Definition',
'Project Details': 'Details zum Projekt',
'Project Name': 'Name des Projekts',
'Project Status': 'Projektstatus',
'Project added': 'Projekt hinzugefügt',
'Project deleted': 'Projekt gelöscht',
'Project has no Lat/Lon': 'Projekt hat keine Geographische Koordinate (lat/lon)',
'Project updated': 'Projekt aktualisiert',
'Project': 'Projekt',
'Projection Details': 'Details zur Kartenprojektion',
'Projection added': 'Kartenprojektion hinzugefügt',
'Projection deleted': 'Kartenprojektion gelöscht',
'Projection updated': 'Kartenprojektion aktualisiert',
'Projection': 'Kartenprojektion',
'Projections': 'Kartenprojektionen',
'Projects': 'Projekte',
'Property reference in the council system': 'Anlage im Behördensystem',
'Proposed': 'Vorgeschlagen',
'Protected resource': 'Geschützte Ressource',
'Protection': 'Schutz',
'Provide Metadata for your media files': 'Stellen Sie Metadaten für Ihre Mediadateien zur Verfügung.',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Stekllen Sie optional eine Skizze des gesamten Gebäudes oder der beschädigten Objekte. Markieren Sie dabei die beschädigte Stellen.',
'Psychiatrics/Adult': 'Psychiatrie/Erwachsene',
'Psychiatrics/Pediatric': 'Psychiatrie/Kinder',
'Public Event': 'Öffentliche Ereignis',
'Public and private transportation': 'Öffentlicher und privater Transport',
'Public assembly': 'Öffentliche Versammlung',
'Public': 'Öffentlich',
'Publish': 'Veröffentlichen',
'Published On': 'Veröffentlicht am',
'Pull tickets from external feed': 'Tickets von externen Feeds laden',
'Purchase Date': 'Kaufdatum',
'Purchase Price': 'Kaufpreis',
'Purchase': 'Kauf',
'Purpose': 'Zweck',
'Push tickets to external system': 'Transferiere Tickets zu externen System',
'Pyroclastic Flow': 'Pyroklastischer Strom',
'Pyroclastic Surge': 'Pyroklastischer Welle',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial-Modul ist innerhalb der aktiven Python Umgebung nicht verfügbar - dieses muss installiert werden um das Modem zu aktivieren.',
'Python needs the ReportLab module installed for PDF export': 'Python braucht das ReportLab-Modul für die PDF-Ausgabe. Dies ist derzeit nicht installiert!',
'Quality/Mode': 'Qualität/Modus',
'Quantity Committed': 'Menge bestätigt',
'Quantity Fulfilled': 'Menge erfüllt',
'Quantity range': 'Mengenumfang',
'Quantity Received': 'Erhaltene Menge',
'Quantity Returned': 'Zurückgegebene Menge',
'Quantity Sent': 'Gesendete Menge',
'Quantity in Transit': 'Menge in Transit',
'Quantity': 'Menge',
'Quarantine': 'Quarantäne',
'Queries': 'Abfragen',
'Query': 'Abfrage',
'Queryable?': 'Abfragbar?',
'RC frame with masonry infill': 'RC Rahmen mit Mauerwerkfüllung',
'RECORD A': 'DATENSATZ A',
'RECORD B': 'DATENSATZ B',
'Race': 'Rasse',
'Radio Callsign': 'Radio Rufzeichen',
'Radiological Hazard': 'Strahlungsgefahr',
'Radiology': 'Radiologie',
'Railway Accident': 'Eisenbahnunfall',
'Railway Hijacking': 'Eisenbahnentführung',
'Rain Fall': 'Regenfall',
'Rapid Assessment Details': 'Details zur Schnell-Beurteilung',
'Rapid Assessment added': 'Schnell-Beurteilung hinzugefügt',
'Rapid Assessment deleted': 'Schnell-Beurteilung gelöscht',
'Rapid Assessment updated': 'Schnell-Beurteilung aktualisiert',
'Rapid Assessment': 'Schnell-Beurteilung',
'Rapid Assessments & Flexible Impact Assessments': 'Schnell-Beurteilungen & flexible Abschätzungen der Auswirkungen',
'Rapid Assessments': 'Schnell-Beurteilungen',
'Rapid Close Lead': 'Schnell Führung schliessen',
'Rapid Data Entry': 'Schnelle Dateneingabe',
'Raw Database access': 'Direkter Datenbankzugriff',
'Ready for Transfer': 'Transferbereit',
'Receive New Shipment': 'Neue Lieferung erhalten',
'Receive Shipment': 'Lieferung erhalten',
'Receive this shipment?': 'Lieferung erhalten?',
'Receive': 'Erhalten',
'Received By Person': 'Erhalten von einer Person',
'Received By': 'Erhalten von',
'Received Item Details': 'Details zum erhaltenen Artikel',
'Received Item deleted': 'Erhaltener Artikel gelöscht',
'Received Item updated': 'Erhaltener Artikel aktualisiert',
'Received Shipment Details': 'Details zur erhaltenen Lieferung',
'Received Shipment canceled and items removed from Inventory': 'Erhaltene Lieferung abgebrochen und Artikel aus dem Bestand entfernt',
'Received Shipment canceled': 'Erhaltene Lieferung abgebrochen',
'Received Shipment updated': 'Erhaltene Lieferung aktualisiert',
'Received Shipments': 'Erhaltene Lieferung',
'Received': 'Erhalten',
'Received date': 'Eingangsdatum',
'Received/Incoming Shipments': 'Erhaltene/Einkommende Lieferungen',
'Receiving and Sending Items': 'Erhalten und Versenden von Artikeln',
'Recipient': 'Empfänger',
'Recipients': 'Empfänger',
'Recipient(s)': 'Empfänger',
'Recommendations for Repair and Reconstruction or Demolition': 'Empfehlungen für Reparatur und Wiederherstellung oder Abriß',
'Record Details': 'Details zum Datensatz',
'Record Saved': 'Datensatz gesichert',
'Record added': 'Datensatz hinzugefügt',
'Record any restriction on use or entry': 'Registrieren jeglicher Einschränkung bei der Nutzung oder Eintragung',
'Record deleted': 'Datensatz gelöscht',
'Record last updated': 'Datensatz zuletzt aktualisiert',
'Record not found!': 'Datensatz nicht gefunden!',
'Record not found': 'Datensatz nicht gefunden',
'Record updated': 'Datensatz aktualisiert',
'Record': 'Datensatz',
'Recording and Assigning Assets': 'Aufzeichnen und Zuweisen von Anlagen',
'Records': 'Datensätze',
'Recovery Request added': 'Bergungsanfrage hinzugefügt',
'Recovery Request deleted': 'Bergungsanfrage gelöscht',
'Recovery Request updated': 'Bergungsanfrage aktualisiert',
'Recovery Request': 'Bergungsanfrage',
'Recovery Requests': 'Bergungsanfragen',
'Recovery': 'Bergung',
'Recurring Cost': 'Wiederkehrende Kosten',
'Recurring Request?': 'Wiederkehrende Anfrage?',
'Recurring cost': 'Wiederkehrende Kosten',
'Recurring costs': 'Wiederkehrende Kosten',
'Recurring': 'Wiederkehrend',
'Red Cross / Red Crescent': 'Rotes Kreuz / Roter Halbmond',
'Red': 'Rot',
'Reference Document': 'Referenzdokument',
'Refresh Rate (seconds)': 'Aktualisierungsrate (Sekunden)',
'Refugees': 'Flüchtlinge',
'Refugee Support Database': 'Flüchtlingshilfe-Datenbank',
'Region': 'Regierungsbezirk',
'Region Location': 'Standort Region',
'Regional': 'Regional',
'Regions': 'Regionen',
'Register Person into this Camp': 'Registrieren der Person in dieses Camp',
'Register Person into this Shelter': 'Registrieren der Person in diese Unterkunft',
'Register Person': 'Registrieren einer Person',
'Register them as a volunteer': 'Als Freiwillige registrieren',
'Register': 'Registrieren',
'Registered People': 'Registrierte Personen',
'Registered users can': 'Registrierte Benutzer können',
'Registered on': 'Registriert am',
'Registration Date': 'Registrierungsdatum',
'Registration Details': 'Details zur Registrierung',
'Registration added': 'Registrierung hinzugefügt',
'Registration entry deleted': 'Anmeldungseintrag gelöscht',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Die Registrierung wartet noch auf die Genehmigung von der Qualifizierenden Stelle (%s) - bitte warten Sie bis Sie eine Bestätigung erhalten',
'Registration updated': 'Anmeldung aktualisiert',
'Registration': 'Registrierung',
'Rehabilitation/Long Term Care': 'Rehabilitation/Langfristige Pflege',
'Reinforced masonry': 'Mauerwerk verstärkt',
'Rejected': 'Zurückgewiesen',
'Relief Team': 'Unterstützungsteam',
'Relief': 'Unterstützung',
'Religious Leader': 'Religiöser Führer',
'Religious': 'Religiös',
'Relocate as instructed in the <instruction>': 'Verlagern wie in der <instruction> angewiesen',
'Remarks': 'Bemerkungen',
'Remove Asset from this event': 'Anlage von diesem Ereignis entfernen',
'Remove Asset from this scenario': 'Anlage von diesem Szenario entfernen',
'Remove Facility from this event': 'Einrichtung von diesem Ereignis entfernen',
'Remove Facility from this scenario': 'Einrichtung von diesem Szenario entfernen',
'Remove Human Resource from this event': 'Personelle Ressource von diesem Ereignis entfernen',
'Remove Human Resource from this scenario': 'Personelle Ressource von diesem Szenario entfernen',
'Remove Incident Type from this event': 'Vorfallstyp von diesem Ereignis entfernen',
'Remove Item from Inventory': 'Artikel aus Bestand entfernen',
'Remove Layer from Profile': 'Löschen der Kartenebene aus dem Profil',
'Remove Map Profile from this event': 'Kartenkonfiguration von diesem Ereignis entfernen',
'Remove Map Profile from this scenario': 'Kartenkonfiguration von diesem Szenario entfernen',
'Remove Person from Group': 'Person aus Gruppe entfernen',
'Remove Person from Team': 'Person aus Team entfernen',
'Remove existing data before import': 'Löschen der existierenden Daten vor dem Import',
'Remove this asset from this event': 'Diese Anlage vom Ereignis entfernen',
'Remove this asset from this scenario': 'Diese Anlage vom Szenario entfernen',
'Remove': 'Entfernen',
'Removed from Group': 'Aus Gruppe entfernt',
'Removed from Team': 'Aus Team entfernt',
'Repacked By': 'Umgepackt von',
'Repair': 'Reparieren',
'Repairs': 'Reparaturen',
'Repaired': 'Repariert',
'Repeat your password': 'Kennwort wiederholen',
'Replace if Master': 'Ersetzen wenn Master',
'Replace if Newer': 'Ersetze, falls neuer',
'Replace': 'Ersetzen',
'Report Another Assessment...': 'Melde andere Beurteilung...',
'Report Details': 'Details zum Bericht',
'Report Options': 'Optionen zum Bericht',
'Report Options': 'Optionen zum Bericht:',
'Report Types Include': 'Berichtstypen beinhalten',
'Report added': 'Bericht hinzugefügt',
'Report deleted': 'Bericht gelöscht',
'Report my location': 'Meinen Standort melden',
'Report of': 'Bericht von',
'Report the contributing factors for the current EMS status.': 'Melde die beitragenen Faktoren für den aktuellen EMS Status',
'Report the contributing factors for the current OR status.': 'Melde die beitragenden Faktoren für den aktuellen OR Status.',
'Report them as found': 'Als gefunden melden',
'Report them missing': 'Als vermisst melden',
'Report updated': 'Bericht aktualisiert',
'Report': 'Bericht',
'Report To': 'Melden bei',
'Reported To': 'Gemeldet bei',
'Reporter Name': 'Name des Meldenden',
'Reporter': 'Meldender',
'Reporting on the projects in the region': 'Berichterstattung über die Projekte in der Region',
'Reports': 'Berichte',
'Repositories': 'Repositories',
'REQ': 'Anfrage',
'REQ Number': 'Anfragenummer',
'RSS Channels': 'RSS Kanäle',
'RSS Posts': 'RSS Posts',
'Request Added': 'Anfrage hinzugefügt',
'Request Canceled': 'Anfrage storniert',
'Request Details': 'Details zur Anfrage',
'Request Templates': 'Anfragevorlagen',
'Requested For Facility': 'Angefragt für Einrichtung',
'Request From': 'Anfrage von',
'Request Item Details': 'Details zur Anfrage nach Artikel',
'Request Item added': 'Anfrage nach Artikel hinzugefügt',
'Request Item deleted': 'Anfrage nach Artikel entfernt',
'Request Item from Available Inventory': 'Anfrage nach Artikel aus verfügbarem Bestand',
'Request Item updated': 'Anfrage nach Artikel aktualisiert',
'Request Item': 'Angefragter Artikel',
'Request Items': 'Angefragte Artikel',
'Request Status': 'Anfragestatus',
'Request Type': 'Anfragetyp',
'Request Updated': 'Anfrage aktualisiert',
'Request added': 'Anfrage hinzugefügt',
'Request deleted': 'Anfrage gelöscht',
'Request for Role Upgrade': 'Rollenupgrade anfordern',
'Request updated': 'Anfrage aktualisiert',
'Request': 'Anfrage',
'Requests': 'Anfragen',
'Request, Response & Session': 'Anfrage, Antwort & Sitzung',
'Requested By Facility': 'Angefragt von Einrichtung',
'Requested By': 'Angefragt durch',
'Requested From': 'Angefragt von',
'Requested Items': 'Angefragte Artikel',
'Requested Skills': 'Angefragte Fähigkeiten',
'Requested by': 'Angefragt durch',
'Requested on': 'Angefragt am',
'Requested': 'Angefragt',
'Requester': 'Anfragender',
'Requests Management': 'Anfragenverwaltung',
'Requests': 'Anfragen',
'Required Skills': 'Benötigte Fähigkeiten',
'Requires Login!': 'Anmeldung erforderlich!',
'Rescue and recovery': 'Rettung und Bergung (SAR)',
'Reset Password': 'Kennwort zurücksetzen',
'Reset': 'Zurücksetzen',
'Residents': 'Bewohner',
'Resolve Conflict': 'Konflikt lösen',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Das verfolgen des Links lässt eine neue Anzeige erscheinen die hilft doppelte Einträge aufzulösen und die Datenbank zu aktualisieren',
'Resolve': 'Auflösen',
'Resource Details': 'Details zur Ressource',
'Resource Inventory': 'Ressourcenbestand',
'Resource Type': 'Ressourcentyp',
'Resource added': 'Ressource hinzugefügt',
'Resource deleted': 'Ressource gelöscht',
'Resource updated': 'Ressource aktualisiert',
'Resource': 'Ressource',
'Resources': 'Ressourcen',
'Respiratory Infections': 'Atemwegsinfektionen',
'Response': 'Antwort',
'Restricted Access': 'Eingeschränkter Zugriff',
'Restricted Use': 'Eingeschränkte Verwendung',
'Result': 'Ergebniss',
'Results': 'Ergebnisse',
'Retail Crime': 'Einzelhandel Kriminalität',
'Retrieve Password': 'Kennwort abrufen',
'Return to Request': 'Zurück zur Anfrage',
'Return': 'Zurück',
'Returned From': 'Zurückgegeben von',
'Returned': 'Zurückgegeben',
'Review Incoming Shipment to Receive': 'Überprüfung der eingehenden Lieferung für die Annahme',
'Rice': 'Reis',
'Rich Text?': 'Rich Text?',
'Riot': 'Aufruhr',
'River Details': 'Details zum Fluss',
'River added': 'Fluss hinzugefügt',
'River deleted': 'Fluss gelöscht',
'River updated': 'Fluss aktualisiert',
'River': 'Fluss',
'Rivers': 'Flüsse',
'Road Accident': 'Verkehrsunfall',
'Road Closed': 'Straße gesperrt',
'Road Conditions': 'Zustand der Straßen',
'Road Delay': 'Verkehrsverzögerung',
'Road Hijacking': 'Straßenentführung',
'Road Usage Condition': 'Strassennutzungszustand',
'Role Details': 'Details zur Rolle',
'Role Name': 'Name der Rolle',
'Role Required': 'Erforderliche Rolle',
'Role Updated': 'Rolle aktualisiert',
'Role added': 'Rolle hinzugefügt',
'Role deleted': 'Rolle gelöscht',
'Role updated': 'Rolle aktualisiert',
'Role': 'Rolle',
'Role-based': 'Rollenbasiert',
'Roles Permitted': 'Zulässige Rollen',
'Roles': 'Rollen',
'Roll On Roll Off Berth': 'Fähranlegestelle',
'Roof tile': 'Dachziegel',
'Roofs, floors (vertical load)': 'Dächer, Böden (vertikale Belastung)',
'Room Details': 'Details zum Raum',
'Room added': 'Raum hinzugefügt',
'Room deleted': 'Raum gelöscht',
'Room updated': 'Raum aktualisiert',
'Room': 'Raum',
'Rooms': 'Räume',
'Rows in table': 'Zeilen in der Tabelle',
'Rows selected': 'Ausgewählte Zeilen',
'Run Interval': 'Intervall der Läufe',
'Runway Length (m)': 'Länge der Landebahn (m)',
'Runway Surface': 'Oberfläche der Landebahn',
'Runway Width (m)': 'Breite der Landebahn (m)',
'Running Cost': 'Laufzeitkosten',
'SMS Modem Channels': 'SMS Modem Kanäle',
'SMS Outbound Gateways': 'SMS Ausgangsgateaways',
'SMS SMTP Channels': 'SMS SMTP Kanäle',
'SMS WebAPI Channels': 'SMS WebAPI Kanäle',
'Safe environment for vulnerable groups': 'Sichere Umgebung für gefährdete Gruppen',
'Safety Assessment Form': 'Formular für Sicherheitsbeurteilung',
'Safety of children and women affected by disaster?': 'Ist die Sicherheit von Kindern und Frauen durch die Katastrophe (resp. das Unglück) beeinträchtigt?',
'Sahana Blue': 'Sahana Blau',
'Sahana Community Chat': 'Sahana Gemeinschaft Chat',
'Sahana Eden <=> Other': 'Sahana Eden <=> Andere',
'Sahana Eden Humanitarian Management Platform': 'Sahana Eden - OpenSource Management-Plattform für humanitäre Notsituationen',
'Sahana Eden Website': 'Sahana Eden Internetseite',
'Sahana Steel': 'Sahana Stahl',
'Sahana access granted': 'Sahana Zugriff gewährt',
'Salted Fish': 'Gesalzener Fisch',
'Sanitation problems': 'Sanitäre Probleme',
'Satellite': 'Satellit',
'Saturday': 'Samstag',
'Save: Default Lat, Lon & Zoom for the Viewport': 'Speichern: Standardmäßig Länge/Breite und Zoomfaktor',
'Save': 'Speichern',
'Saved.': 'Gespeichert.',
'Saved Filters': 'Gespeicherte Filter',
'Saving...': 'Wird gespeichert...',
'Scale of Results': 'Umfang der Ergebnisse',
'Scenario Details': 'Details zum Szenario',
'Scenario added': 'Szenario hinzugefügt',
'Scenario deleted': 'Szenario gelöscht',
'Scenario updated': 'Szenario aktualisiert',
'Scenario': 'Szenario',
'Scenarios': 'Szenarios',
'Schedule': 'Zeitplan',
'School Closure': 'Schulschließung',
'School Lockdown': 'Schule geschlossen',
'School Teacher': 'Schullehrer',
'School activities': 'Schulaktivitäten',
'School assistance': 'Schulunterstützung',
'School attendance': 'Schulbesuch',
'School destroyed': 'Schule zerstört',
'School heavily damaged': 'Schule stark beschädigt',
'School tents received': 'Schulzelte erhalten',
'School tents, source': 'Herkunft der Schulzelte',
'School used for other purpose': 'Schule wird für andere Zwecke verwendet',
'School': 'Schule',
'School/studying': 'Schule/lernen',
'Schools': 'Schulen',
'Seaports': 'Seehafen',
'Search Activities': 'Suchaktivitäten',
'Search Activity Report': 'Bericht über Suchaktivitäten',
'Search Addresses': 'Suche nach Adressen',
'Search All Requested Items': 'Alle angefordeten Artikel durchsuchen',
'Search All Requested Skills': 'Alle angefragten Fähigkeiten durchsuchen',
'Search Alternative Items': 'Suche nach alternativen Artikeln',
'Search Assessment Summaries': 'Suche Beurteilungszusammenfassungen',
'Search Assessments': 'Suche Beurteilungen',
'Search Asset Log': 'Suche Anlageprotokoll',
'Search Assets': 'Suche Anlagen',
'Search Baseline Type': 'Referenzdatumstyp suchen',
'Search Baselines': 'Referenzdatum suchen',
'Search Brands': 'Marken suchen',
'Search Budgets': 'Budgets suchen',
'Search Bundles': 'Produktpakete suchen',
'Search Camp Services': 'Camp Leistungen suchen',
'Search Camp Types': 'Camp Typen suchen',
'Search Camps': 'Camps suchen',
'Search Catalog Items': 'Katalog Einträge suchen',
'Search Catalogs': 'Kataloge suchen',
'Search Certificates': 'Zertifikate suchen',
'Search Certifications': 'Zertifizierungen suchen',
'Search Checklists': 'Checklisten suchen',
'Search Cluster Subsectors': 'Cluster Teilbereiche suchen',
'Search Clusters': 'Cluster suchen',
'Search Commitment Items': 'Zugesagte Artikel suchen',
'Search Commitments': 'Zusagen suchen',
'Search Competencies': 'Kompetenzen suchen',
'Search Competency Ratings': 'Kompetenzeinstufungen suchen',
'Search Contact Information': 'Nach Kontaktinformationen suchen',
'Search Contacts': 'Nach Kontakten suchen',
'Search Course Certificates': 'Suchen nach Kurszertifikaten',
'Search Courses': 'Kurse suchen',
'Search Credentials': 'Qualifikationen suchen',
'Search Documents': 'Dokumente suchen',
'Search Donors': 'Spender suchen',
'Search Entries': 'Einträge suchen',
'Search Events': 'Ereignisse suchen',
'Search Facilities': 'Einrichtungen suchen',
'Search Feature Layers': 'Objekt-Ebenen suchen',
'Search Flood Reports': 'Flutberichte suchen',
'Search Groups': 'Gruppen suchen',
'Search Human Resources': 'Personelle Ressourcen suchen',
'Search Identity': 'Identität suchen',
'Search Images': 'Bilder suchen',
'Search Impact Type': 'Auswirkungstypen suchen',
'Search Impacts': 'Auswirkungen suchen',
'Search Incident Reports': 'Vorfallberichte suchen',
'Search Inventory Items': 'Bestandsartikel suchen',
'Search Inventory items': 'Bestandsartikel suchen',
'Search Item Categories': 'Artikelkategorien suchen',
'Search Item Packs': 'Artikelpakete suchen',
'Search Items': 'Artikel suchen',
'Search Job Roles': 'Tätigkeiten suchen',
'Search Keys': 'Sschlüssel suchen',
'Search Kits': 'Ausstattungen (Kits) suchen',
'Search Layers': 'Kartenebenen suchen',
'Search Level 1 Assessments': 'Suche Stufe 1 Beurteilungen',
'Search Level 2 Assessments': 'Suche Stufe 2 Beurteilungen',
'Search Locations': 'Gebiet/Standort suchen',
'Search Log Entry': 'Protokolleintrag suchen',
'Search Map Profiles': 'Kartenkonfiguration suchen',
'Search Markers': 'Marker/Symbol suchen',
'Search Members': 'Mitglied suchen',
'Search Membership': 'Mitgliedschaft suchen',
'Search Missions': 'Aufträge suchen',
'Search Need Type': 'Anforderungstyp suchen',
'Search Needs': 'Anforderungstyp suchen',
'Search Offices': 'Büros suchen',
'Search Organizations': 'Organisationen suchen',
'Search Peer': 'Peer Suchen',
'Search Personal Effects': 'Persönliche Habe suchen',
'Search Persons': 'Personen suchen',
'Search Photos': 'Fotos suchen',
'Search Population Statistics': 'Bevölkerungsstatistiken suchen',
'Search Positions': 'Positionen suchen',
'Search Problems': 'Probleme suchen',
'Search Projections': 'Kartenprojektionen suchen',
'Search Projects': 'Projekte suchen',
'Search Queries': 'Suchabfragen',
'Search Rapid Assessments': 'Schnell-Beurteilung suchen',
'Search Received Items': 'Erhaltene Artikel suchen',
'Search Received Shipments': 'Erhaltene Lieferungen suchen',
'Search Records': 'Datensätze suchen',
'Search Registrations': 'Registrierungen suchen',
'Search Registration Request': 'Registrierungsanfragen suchen',
'Search Report': 'Berichte suchen',
'Search Request Items': 'Angefragte Artikel suchen',
'Search Request': 'Anfrage suchen',
'Search Requested Items': 'Angefragte Artikel suchen',
'Search Requests': 'Anfragen suchen',
'Search Resources': 'Ressourcen suchen',
'Search Rivers': 'Flüsse suchen',
'Search Roles': 'Rollen suchen',
'Search Rooms': 'Räume suchen',
'Search Scenarios': 'Szenarien suchen',
'Search Sections': 'Abschnitte suchen',
'Search Sectors': 'Bereiche suchen',
'Search Sent Items': 'Gesendete Artikel suchen',
'Search Sent Shipments': 'Gesendete Lieferungen suchen',
'Search Service Profiles': 'Leistungsprofile suchen',
'Search Settings': 'Sucheinstellungen',
'Search Shelter Services': 'Unterkunftsleistungen suchen',
'Search Shelter Types': 'Unterkunftsarten suchen',
'Search Shelters': 'Unterkünfte suchen',
'Search Shipped Items': 'Suche über gelieferte Artikel',
'Search Skill Equivalences': 'Fähigkeits-Vergleichbarkeiten suchen',
'Search Skill Provisions': 'Fähigkeits-Bereitstellungen suchen',
'Search Skill Types': 'Fähigkeitstypen suchen',
'Search Skills': 'Fähigkeiten suchen',
'Search Solutions': 'Lösungen suchen',
'Search Staff Types': 'Mitarbeitertypen suchen',
'Search Staff or Volunteer': 'Suche Mitarbeiter oder Freiwillige',
'Search Status': 'Status suchen',
'Search Subscriptions': 'Abonnement suchen',
'Search Subsectors': 'Teilbereiche suchen',
'Search Support Requests': 'Unterstützungsanfragen suchen',
'Search Tasks': 'Aufgaben suchen',
'Search Teams': 'Teams suchen',
'Search Themes': 'Themen suchen',
'Search Tickets': 'Tickets suchen',
'Search Tracks': 'Tracks suchen',
'Search Training Participants': 'Suche Kursteilnehmer',
'Search Trainings': 'Schulung suchen',
'Search Twitter Tags': 'Twitter-Tags suchen',
'Search Units': 'Einheiten suchen',
'Search Users': 'Benutzer suchen',
'Search Volunteer Availability': 'Verfügbarkeit von Freiwilligen suchen',
'Search Volunteers': 'Freiwillige suchen',
'Search Warehouses': 'Warenlager suchen',
'Search and Edit Group': 'Suchen und Bearbeiten von Gruppen',
'Search and Edit Individual': 'Suchen und Bearbeiten von einzelnen Personen',
'Search by Skills': 'Suche nach Fähigkeiten',
'Search by skills': 'Suche nach Fähigkeiten',
'Search for Staff or Volunteers': 'Suche nach Mitarbeitern oder Freiwilligen',
'Search for a Location by name, including local names.': 'Suchen nach Standortnamen, einschließlich lokaler Namen.',
'Search for a Person': 'Such nach einer Person',
'Search for a Project': 'Suche nach einem Projekt',
'Search for a shipment by looking for text in any field.': 'Suche nach einer Lieferung (Volltextsuche)',
'Search for a shipment received between these dates': 'Suche nach einer erhaltenen Lieferung im Zeitraum',
'Search for an Organization by name or acronym': 'Suche nach einer Organisation nach Namen oder Abkürzung',
'Search for an Organization by name or acronym.': 'Suche nach einer Organisation in Namen und Acronym.',
'Search for an asset by text.': 'Suche Anlage über Text.',
'Search for an item by category.': 'Suche Artikel nach Kategorie.',
'Search for an item by text.': 'Suche Artikel über Text.',
'Search for asset by country.': 'Suche Anlage nach Ländern.',
'Search for office by country.': 'Suche Büro nach Ländern.',
'Search for office by organization.': 'Suche Büro nach Organisation.',
'Search for office by text.': 'Suche Büro über Text',
'Search for Persons': 'Suche nach Personen',
'Search for warehouse by country.': 'Suche Warenlager nach Ländern',
'Search for warehouse by organization.': 'Suche Warenlager nach Organisation',
'Search for warehouse by text.': 'Suche Warenlager über Text',
'Search here for a person record in order to:': 'Hier nach einem Personendatensatz suchen, um zu:',
'Search location in Geonames': 'Ortssuche in Geonames',
'Search messages': 'Suche Nachrichten',
'Search': 'Suchen',
'Searching for different groups and individuals': 'Suche nach verschiedenen Gruppen und Einzelpersonen',
'Secondary Server (Optional)': 'Sekundärer Server (optional)',
'Seconds must be a number between 0 and 60': 'Sekunden müssen eine Zahl zwischen 0 und 60 sein',
'Section Details': 'Details zum Abschnitt',
'Section deleted': 'Abschnitt gelöscht',
'Section updated': 'Abschnitt aktualisiert',
'Sections': 'Abschnitte',
'Sector Details': 'Details zum Bereich ',
'Sector added': 'Bereich hinzugefügt',
'Sector deleted': 'Bereich gelöscht',
'Sector updated': 'Bereich aktualisiert',
'Sector': 'Bereich',
'Sector(s)': 'Bereich(e)',
'Sectors': 'Bereiche',
'Secure Storage Capacity': 'Sichere Lagerkapazität',
'Security Status': 'Sicherheitsstatus',
'Security problems': 'Sicherheitsprobleme',
'Security': 'Sicherheit',
'See All Entries': 'Siehe alle Einträge',
'See all': 'Alles anzeigen',
'See unassigned recovery requests': 'Siehe nicht zugeordnete Bergungsanfragen.',
'Select': 'Auswahl',
'Select All': 'Alles auswählen',
'Select Items from the Request': 'Wählen sie Artikel aus der Anfrage',
'Select Items from this Inventory': 'Wählen sie Artikel aus diesem Bestand',
'Select Land': 'Land auswählen',
'Select Modules for translation': 'Auswahl der Module zum Übersetzen',
'Select a location': 'Wählen Sie einen Ort aus',
'Select a question from the list': 'Wählen sie eine Frage aus der Liste aus',
'Select a range for the number of total beds': 'Wählen sie einen Bereich für die Gesamtanzahl von Betten',
'Select all that apply': 'Wählen Sie alles Zutreffende aus',
'Select an Organization to see a list of offices': 'Wählen Sie eine Organisation aus, um eine Liste der zugehörigen Büros anzuzeigen.',
'Select resources to import': 'Wählen Sie Ressourcen zum Importieren aus',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Wählen sie die overlays für die Beurteilungen und die zugehörigen Aktivitäten um die Differenz zu identifizieren.',
'Select the person assigned to this role for this project.': 'Wählen Sie die Person die mit diesr Rolle dem Projekt zugeordnet werden soll.',
'Select to show this configuration in the Regions menu.': "Auswahl um sich diese Konfiguration im Menu 'Regionen' anzeigen.",
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Auswahl ob ein Modem, Tropo oder eine andere Schnittstelle zum Versand von SMS verwendet werden soll.',
'Send Alerts using Email &/or SMS': 'Senden von Alarmen unter Nutzung von E-Mail und/oder SMS',
'Send Commitment as Shipment': 'Zusage Lieferung zu senden',
'Send Message': 'Nachricht senden',
'Send New Shipment': 'Neue Lieferung senden',
'Send Notification': 'Benachrichtigung senden',
'Send Shipment': 'Lieferung senden',
'Send Task Notification': 'Auftragsbenachrichtigung senden',
'Send a message to this person': 'Dieser Person eine Nachricht senden',
'Send a message to this team': 'Diesem Team eine Nachricht senden',
'Send from %s': 'Senden von %s',
'Send message': 'Nachricht senden',
'Send new message': 'Neue Nachricht senden',
'Send': 'Senden',
'Sends & Receives Alerts via Email & SMS': 'Schickt & empfängt Benachrichtigungen über Email und SMS',
'Sent By Person': 'Gesendet von einer Person',
'Sent By': 'Gesendet von',
'Sent Emails': 'Gesendete E-Mails',
'Sent Item Details': 'Details zum versendeten Artikel',
'Sent Item deleted': 'Gesendeter Artikel gelöscht',
'Sent Item updated': 'Gesendeter Artikel aktualisiert',
'Sent Posts': 'Gesendete Posts',
'Sent Shipment Details': 'Details zur gesendeten Lieferungsdetails',
'Sent Shipment canceled and items returned to Inventory': 'Gesendete Lieferung storniert und Artikel zum Lager zurückgebracht',
'Sent Shipment canceled': 'Gesendete Lieferung storniert',
'Sent Shipment updated': 'Gesendete Lieferung aktualisiert',
'Sent Shipments': 'Gesendete Lieferungen',
'Sent SMS': 'Gesendete SMS',
'Sent date': 'Versanddatum',
'Sent': 'gesendet',
'Separated children, caregiving arrangements': 'von Eltern getrennte Kinder, Pflegevereinbarungen',
'Serial Number': 'Seriennummer',
'Series': 'Serie',
'Server': 'Server',
'Service Catalog': 'Leistungskatalog',
'Service Record': 'Leistungseintrag',
'Service or Facility': 'Leistung oder Einrichtung',
'Service profile added': 'Leistungsprofil hinzugefügt',
'Service profile deleted': 'Leistungsprofil gelöscht',
'Service profile updated': 'Leistungsprofil aktualisiert',
'Service': 'Leistung',
'Services Available': 'Verfügbare Leistungen',
'Services': 'Leistungen',
'Set Base Site': 'Basisstandort festlegen',
'Set By': 'Definiert durch',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': "Wählen sie 'Wahr' um Benutzern, die nicht Karten-Admins sind, zu erlauben dieses Level der Gebietshierachie zu verändern.",
'Setting Details': 'Details konfigurieren',
'Setting added': 'Einstellung hinzugefügt',
'Setting deleted': 'Einstellungen gelöscht',
'Setting updated': 'Einstellung aktualisiert',
'Settings updated': 'Einstellungen aktualisiert',
'Settings were reset because authenticating with Twitter failed': 'Einstellungen wurden zurückgesetzt da die Authentifizierung mit Twitter fehlgeschlagen ist',
'Settings which can be configured through the web interface are available here.': 'Die Einstellungen, die über das Webinterface konfiguriert werden können, sind hier verfügbar.',
'Settings': 'Einstellungen',
'Severe': 'Ernsthaft',
'Severity': 'Wertigkeit',
'Sex': 'Geschlecht',
'Share a common Marker (unless over-ridden at the Feature level)': 'Definiere einen allgemeinen Marker/Symbol (kann auf Objekt-Ebene überschrieben werden)',
'Shelter & Essential NFIs': 'Unterkünfte & Essentielle NFIs',
'Shelter Details': 'Details zur Unterkunft',
'Shelter Name': 'Name der Unterkunft',
'Shelter Registration Status': 'Registrierungsstatus',
'Shelter Registry': 'Unterkunft Register',
'Shelter Service Details': 'Details zur Unterkunftsleistung',
'Shelter Service added': 'Unterkunftsleistung hinzugefügt',
'Shelter Service deleted': 'Unterkunftsleistung gelöscht',
'Shelter Service updated': 'Unterkunftsleistung aktualisiert',
'Shelter Service': 'Unterkunftsleistung',
'Shelter Services': 'Unterkunftsleistungen',
'Shelter Settings': 'Eigenschaften der Unterkunft',
'Shelter Type Details': 'Details zum Unterkunftstyp',
'Shelter Type added': 'Unterkunftstyp hinzugefügt',
'Shelter Type deleted': 'Unterkunftstyp gelöscht',
'Shelter Type updated': 'Unterkunftstyp aktualisiert',
'Shelter Type': 'Unterkunftstyp',
'Shelter Types and Services': 'Unterkunftstypen und -leistungen',
'Shelter Types': 'Unterkunftstypen',
'Shelter added': 'Unterkunft hinzugefügt',
'Shelter deleted': 'Unterkunft gelöscht',
'Shelter updated': 'Unterkunft aktualisiert',
'Shelter': 'Unterkunft',
'Shelter/NFI Assistance': 'Unterkunft/ NFI Hilfe',
'Shelters': 'Unterkünfte',
'Shipment Created': 'Lieferung erstellt',
'Shipment Items received by Inventory': 'Lieferungsartikel aus Bestand empfangen',
'Shipment Items sent from Inventory': 'Lieferungsartikel von Bestand gesendet',
'Shipment Items': 'Lieferungsartikel',
'Shipment Type': 'Typ der Lieferung',
'Shipment to Send': 'Zu sendende Lieferung zu senden',
'Shipments To': 'Lieferungen nach',
'Shipments': 'Lieferungen',
'Shipping cost': 'Lieferkosten',
'Shooting': 'Filmaufnahme',
'Short Assessment': 'Kurz Beurteilung',
'Short Description': 'Kurzbeschreibung',
'Show %(number)s entries': 'Zeige %(number)s Einträge',
'Show Checklist': 'Checkliste anzeigen',
'Show Details': 'Details anzeigen',
'Show Location?': 'Gebiet/Standort anzeigen?',
'Show Map': 'Karte anzeigen',
'Show Region in Menu?': 'Region im Menu anzeigen?',
'Show author picture?': 'Bild des Authors anzeigen?',
'Show on Map': 'Auf Karte anzeigen',
'Show on map': 'Auf Karte anzeigen',
'Show totals': 'Summen anzeigen',
'Show': 'Zeige',
'Showing _START_ to _END_ of _TOTAL_ entries': 'Einträge _START_ bis _END_ von _TOTAL_',
'Showing 0 to 0 of 0 entries': 'Keine Einträge',
'Sign-up as a volunteer': 'Als Freiwilliger anmelden',
'Sign-up for Account': 'Für Benutzerkennung anmelden',
'Sign-up succesful - you should hear from us soon!': 'Registrierung erfolgreich - sie werden in Kürze von uns hören.',
'simplified/slow': 'vereinfacht/langsam',
'Site Administration': 'Administration der Seite',
'Site': 'Standort',
'Site Needs': 'Standortbedarf',
'Add Site Needs': 'Standortbedarf hinzufügen',
'Edit Site Needs': 'Standortbedarf ändern',
'Delete Site Needs': 'Standortbedarf löschen',
'Site Needs added': 'Standortbedarf hinzugefügt',
'Site Needs updated': 'Standortbedarf aktualisiert',
'Site Needs deleted': 'Standortbedarf gelöscht',
'Situation Awareness & Geospatial Analysis': 'Situationseinschätzung & Räumliche Analyse',
'Sketch': 'Skizze',
'Skill Catalog': 'Fähigkeitskatalog',
'Skill Details': 'Details zur Fähigkeit',
'Skill Equivalence Details': 'Details zur Fähigkeits-Vergleichbarkeit',
'Skill Equivalence added': 'Fähigkeits-Vergleichbarkeit hinzugefügt',
'Skill Equivalence deleted': 'Fähigkeits-Vergleichbarkeit gelöscht',
'Skill Equivalence updated': 'Fähigkeits-Vergleichbarkeit aktualisiert',
'Skill Equivalence': 'Fähigkeits-Vergleichbarkeit',
'Skill Equivalences': 'Fähigkeits-Vergleichbarkeiten',
'Skill Provision Catalog': 'Fähigkeiten Bestimmungskatalog',
'Skill Provision Details': 'Fähigkeiten Bestimmung Details',
'Skill Provision added': 'Geschick Bestimmung hinzugefügt',
'Skill Provision deleted': 'Fähigkeitenbestimmung gelöscht',
'Skill Provision updated': 'Fähigkeiten Bestimmung aktualisiert',
'Skill Provision': 'Geschick Bestimmung',
'Skill Provisions': 'Fähigkeits-Bereitstellungen',
'Skill Status': 'Fähigkeitsstatus',
'Skill TYpe': 'Art der Fähigkeit',
'Skill Type Catalog': 'Fähigkeitstypen-Katalog',
'Skill Type Details': 'Details zum Fähigkeitstyp',
'Skill Type added': 'Fähigkeitstyp hinzugefuegt',
'Skill Type deleted': 'Fähigkeitstyp gelöscht',
'Skill Type updated': 'Fähigkeitstyp aktualisiert',
'Skill Types': 'Fähigkeitstypen',
'Skill added': 'Fähigkeit hinzugefügt',
'Skill deleted': 'Fähigkeit gelöscht',
'Skill updated': 'Fähigkeit aktualisiert',
'Skill': 'Kenntnisse',
'Skills Catalog': 'Fähigkeiten Katalog',
'Skills Management': 'Fähigkeiten Management',
'Skills': 'Fähigkeiten',
'Skype ID': 'Skype ID',
'Slope failure, debris': 'Abhang Bruch, Schutt',
'Small Trade': 'Kleiner Handel',
'Smoke': 'Rauch',
'Snapshot Report': 'Bericht zur aktuellen Lage',
'Snapshot': 'Momentaufnahme',
'Snow Fall': 'Schneefall',
'Snow Squall': 'Schneeschauer',
'Soil bulging, liquefaction': 'Boden aufgequollen, Verflüssigung',
'Solid waste': 'Feste Abfälle',
'Solution Details': 'Details zur Lösung',
'Solution Item': 'Lösungselement',
'Solution added': 'Lösung hinzugefügt',
'Solution deleted': 'Lösung gelöscht',
'Solution updated': 'Lösung aktualisiert',
'Solution': 'Lösung',
'Solutions': 'Lösungen',
'Some': 'Einige',
'Sorry that location appears to be outside the area of the Parent.': 'Entschuldigung, diese Position scheint ausserhalb des Bereichs des übergeordneten Elements zu liegen.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Entschuldigung, diese Position scheint ausserhalb des Bereichs zu liegen, der von dieser Anwendung unterstützt wird.',
'Sorry, I could not understand your request': 'Entschuldigung, leider konnte ich ihre Anfrage nicht verstehen',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Entschuldigung, nur Benutzer mit der Kartenadministrator-Rolle sind berechtigt Gruppen von Standorten/Gebieten zu erstellen.',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Entschuldigung, nur Benutzer mit der Kartenadministrator-Rolle sind berechtigt diese Standorte/Gebiete zu bearbeiten',
'Sorry, something went wrong.': 'Entschuldigung, leider is etwas schief gelaufen.',
'Sorry, that page is forbidden for some reason.': 'Entschuldigung, leider der Besuch dieser Seite aus einem bestimmten Grund nicht zulässig.',
'Sorry, that service is temporary unavailable.': 'Entschuldigung, leider steht dieses Service vorübergehend nicht zur Verfügung.',
'Sorry, there are no addresses to display': 'Entschuldigung, leider sind keine Adressen vorhanden um angezeigt zu werden.',
'Sought': 'Gesucht',
'Source ID': 'Quellen ID',
'Source Time': 'Zeit der Quelle',
'Source': 'Quelle',
'Sources of income': 'Einkommsquellen',
'Space Debris': 'Weltraumschrott',
'Spanish': 'Spanisch',
'Special Ice': 'Besonderes Eis',
'Special Marine': 'Spezielles Wasserfahrzeug',
'Specialized Hospital': 'Spezialisiertes Krankenhaus',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Bestimmter Bereich (z.B. Gebäude/Raum) innerhalb eines Ortes in der diese Person/Gruppe gefunden werden kann.',
'Specific locations need to have a parent of level': 'Bestimmte Orte benötigen ein übergeordnetes Element der Stufe',
'Specify a descriptive title for the image.': 'Geben Sie einen beschreibenden Titel für das Bild an.',
'Specify the bed type of this unit.': 'Geben Sie den Bettentypen an für diese Einheit an.',
'Specify the number of available sets': 'Geben Sie die Anzahl der verfügbaren Sätze an',
'Specify the number of available units (adult doses)': 'Geben Sie die Anzahl der verfügbaren Einheiten ein (Dosis für Erwachsene)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Geben Sie die Anzahl der verfügbaren Einheiten (in Liter) von Ringer-Lactat oder gleichwertige Lösungen ein',
'Specify the number of sets needed per 24h': 'Geben Sie die Anzahl der erforderlichen Sätze pro 24h ein',
'Specify the number of units (Erwachsenendosen) needed per 24h': 'Geben Sie die Anzahl der Einheiten ein (Dosis für Erwachsene) die pro 24h benötigt werden.',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Geben Sie die Anzahl der Einheiten (in Liter) von Ringer-Lactat oder gleichwertigen Lösungen ein, die man pro 24h braucht.',
'Spherical Mercator?': 'Spherische Mercator?',
'Spreadsheet Importer': 'Import von Tabellendokumenten',
'Spreadsheet uploaded': 'Tabellendokument hochgeladen',
'Squall': 'Sturmschauer',
'Staff & Volunteers': 'Mitarbeiter & Freiwillige',
'Staff & Volunteers (Combined)': 'Mitarbeiter & Freiwillige (kombiniert)',
'Staff ID': 'Mitarbeiter-ID',
'Staff Management': 'Mitarbeitermanagement',
'Staff Member Details': 'Details zum Mitarbeiter',
'Staff Member added': 'Mitarbeiter hinzugefügt',
'Staff Members': 'Mitarbeiter',
'Staff Record': 'Mitarbeiterakte',
'Staff Report': 'Mitarbeiterbericht',
'Staff Type Details': 'Details zum Mitarbeitertyp',
'Staff Type added': 'Mitarbeitertyp hinzugefügt.',
'Staff Type deleted': 'Mitarbeitertyp gelöscht',
'Staff Type updated': 'Mitarbeitertyp aktualisiert',
'Staff Types': 'Mitarbeitertypen',
'Staff and Volunteers': 'Mitarbeiter und Freiwillige',
'Staff & Volunteers (combined)': 'Mitarbeiter & Freiwillige (kombiniert)',
'Staff member added': 'Mitarbeiter hinzugefügt',
'Staff present and caring for residents': 'Mitarbeiter ist anwesend und versorgt die Anwohner.',
'Staff with Contracts Expiring in the next Month': 'Mitarbeiter deren Veträge im Laufe des nächsten Monats ablaufen',
'Staff': 'Mitarbeiter',
'Staffing': 'Mitarbeiterausstattung',
'Stairs': 'Treppen',
'Start Date': 'Startdatum',
'Start date': 'Startdatum',
'Start of Period': 'Beginn einer Periode',
'State': 'Bundesland',
'State / Province': 'Staat / Bundesland',
'State /Province': 'Staat / Bundesland',
'Stationery': 'Büromaterial',
'Status Report': 'Statusbericht',
'Status Reports': 'Statusberichte',
'Status Updated': 'Status aktualisiert',
'Status added': 'Status hinzugefügt',
'Status deleted': 'Status gelöscht',
'Status of clinical operation of the facility.': 'Status von klinischen Möglichkeiten dieser Einrichtung.',
'Status of general operation of the facility.': 'Status von allgemeinen Möglichkeiten dieser Einrichtung.',
'Status of morgue capacity.': 'Status der Leichenhallenkapazität',
'Status of operations of the emergency department of this hospital.': 'Status von Möglichkeiten der Notaufnahme dieses Krankenhauses.',
'Status of security procedures/access restrictions in the hospital.': 'Status von Sicherheitsverfahren/Zugriffsbeschränkung in diesem Krankenhaus.',
'Status of the operating rooms of this hospital.': 'Der Status des Betriebsräume des Krankenhauses.',
'Status updated': 'Status aktualisiert',
'Status': 'Status',
'Steel frame': 'Stahlrahmen',
'Stock': 'Bestand',
'Stock Counts': 'Bestandszahlen',
'Stock in Warehouse': 'Bestand im Warenlager',
'Stolen': 'Gestohlen',
'Store spreadsheets in the Eden database': 'Speichere Tabellendokument in die Eden Datenbank',
'Storeys at and above ground level': 'Stockwerke auf und über der Erdoberfläche',
'Storm Force Wind': 'Sturm Kraft Wind',
'Storm Surge': 'Sturm Spitzenauslastung',
'Stowaway': 'Blinder Passagier',
'Street Address': 'Adresse',
'Strong Wind': 'Starker Wind',
'Structural Hazards': 'Strukturelle Gefahren',
'Structural': 'Strukturell',
'Styles': 'Styles/Symbolisierungen',
'Style Field': 'Style-Feld',
'Style Values': 'Style-Werte',
'Sub-type': 'Unterart',
'Subject': 'Betreff',
'Submission successful - please wait': 'Absenden erfolgreich - bitte warten',
'Submission successful - please wait...': 'Absenden erfolgreich - bitte warten ...',
'Submit New (full form)': 'Daten erneut absenden (vollständiges Formular)',
'Submit New (triage)': 'Daten erneut absenden (Auswahl)',
'Submit New': 'Daten erneut absenden',
'Submit a request for recovery': 'Registrieren einer Bergungsanfrage',
'Submit new Level 1 assessment (full form)': 'Absenden einer neuen Stufe 1 Beurteilung (vollständiges Formular)',
'Submit new Level 1 assessment (triage)': 'Absenden einer neuen Stufe 1 Beurteilung (Auswahl)',
'Submit new Level 2 assessment': 'Absenden einer neuen Stufe 2 Beurteilung',
'Submit': 'Abschicken',
'Subscription Details': 'Details zum Abo',
'Subscription added': 'Abo hinzugefügt',
'Subscription deleted': 'Abo gelöscht',
'Subscription updated': 'Abo aktualisiert',
'Subscriptions': 'Abonnements',
'Subsector Details': 'Details zum Teilbereich',
'Subsector added': 'Teilbereich hinzugefügt',
'Subsector deleted': 'Teilbereich gelöscht',
'Subsector updated': 'Teilbereich aktualisiert',
'Subsector': 'Teilbereich',
'Subsectors': 'Teilbereich',
'Subsistence Cost': 'Verpflegungskosten',
'Suburb': 'Vorort',
'Suggest not changing this field unless you know what you are doing.': 'Bitte ändern sie diesen Bereich nur, wenn sie ganz genau wissen was sie da tun!!!!',
'Summary by Administration Level': 'Zusammenfassung nach Verwaltungsstufe',
'Summary of Incoming Supplies': 'Zusammenfassung der eingehenden Vorräte',
'Summary of Releases': 'Zusammenfassung der Releases',
'Summary': 'Zusammenfassung',
'Sunday': 'Sonntag',
'Supplier/Donor': 'Lieferant/Spender',
'Suppliers': 'Lieferanten',
'Supply Chain Management': 'Versorgungsketten-Management',
'Support provided': 'Durchgeführte Massnahmen',
'Support Request': 'Unterstützungsanforderung',
'Support Requests': 'Unterstützungsanforderungen',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Unterstützt den Entscheidungsprozess von großen Gruppen von Krisenmanagementexperten indem man den Gruppen ermöglicht Prioritätenlisten aufzustellen.',
'Surgery': 'Chirugie',
'Survey Answer Details': 'Details zur Umfrage-Antwort',
'Survey Answer added': 'Umfrage-Antwort hinzugefügt',
'Survey Answer deleted': 'Umfrage-Antwort gelöscht',
'Survey Answer updated': 'Umfrage-Antwort aktualisiert',
'Survey Answer': 'Umfrage-Antwort',
'Survey Module': 'Umfrage Modul',
'Survey Name': 'Name der Umfrage',
'Survey Question Details': 'Details zur Umfrage-Frage',
'Survey Question Display Name': 'Angezeigter Name der Umfrage-Frage',
'Survey Question added': 'Umfrage-Frage hinzugefügt',
'Survey Question deleted': 'Umfrage-Frage gelöscht',
'Survey Question updated': 'Umfrage-Frage aktualisiert',
'Survey Question': 'Umfrage-Frage',
'Survey Series Details': 'Details zur Umfragenserie',
'Survey Series Name': 'Angezeigter Name der Umfrageserie',
'Survey Series added': 'Umfrageserie hinzugefügt',
'Survey Series deleted': 'Umfrageserie gelöscht',
'Survey Series updated': 'Umfrageserie aktualisiert',
'Survey Series': 'Umfrageserien',
'Survey Template Details': 'Details zur Umfragenvorlage',
'Survey Template added': 'Umfragenvorlage hinzugefügt',
'Survey Template deleted': 'Umfragenvorlage gelöscht',
'Survey Template updated': 'Umfragevorlage aktualisiert',
'Survey Template': 'Umfragenvorlage',
'Survey Templates': 'Umfragenvorlagen',
'Surveys': 'Umfragen',
'Suspended': 'Gesperrt',
'Suspended Cases': 'Gesperrte Fälle',
'Switch to 3D': 'In Google Earth anzeigen',
'Symbology': 'Symbolisierung',
'Sync Conflicts': 'Synchronisierungskonflikte',
'Sync History': 'Synchronisierungshistorie',
'Sync Now': 'Jetzt synchronisieren',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Partner für die Synchronisation sind Instanzen von Peers (SahanaEden, SahanaAgasti, Ushahidi, etc. ) mit denen die aktuelle Intanz synchronisiert werden soll. Ein Klick auf den Link rechts bringt Sie zur Seite auf der Sie diese hinzufügen, suchen und ändern können.',
'Sync Partners': 'Partner für die Synchronisation',
'Sync Pools': 'Synchronisierungspools',
'Sync Schedule': 'Synchronisierungszeitplan',
'Sync Settings': 'Synchronisierungseinstellungen',
'Sync process already started on': 'Sync-Prozess bereits gestartet am',
'Synchronisation': 'Synchronisierung',
'Synchronization Conflicts': 'Synchronisierungskonflikte',
'Synchronization Details': 'Synchronisierung - Details',
'Synchronization History': 'Synchronisierungsgeschichte',
'Synchronization Peers': 'Synchronisierung von Peers',
'Synchronization Settings': 'Synchronisierungseinstellungen',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Die Synchronisation erlaubt ihnen Daten gemeinsam zu nutzen, indem ihre eigene Datenbank mit aktuellen Daten anderer aktualisieren oder umgekehrt. Diese Seite informiert sie darüber wie sie das automatische Synchronisationsfeature von Sahana Eden verwenden.',
'Synchronization not configured.': 'Synchronisierung nicht konfiguriert.',
'Synchronization settings updated': 'Synchronisierungseinstellungen wurden aktualisiert',
'Synchronization': 'Synchronisierung',
'Syncronisation History': 'Synchronisierungshistorie',
'Table': 'Tabelle',
'Tags': 'Tags',
'Take shelter in place or per <instruction>': 'Unterkunft aufsuchen oder <instruction>',
'Task Details': 'Details zur Aufgabe',
'Task List': 'Aufgabenliste',
'Task Status': 'Aufgabenstatus',
'Task added': 'Aufgabe hinzugefügt',
'Task deleted': 'Aufgabe gelöscht',
'Task updated': 'Aufgabe aktualisiert',
'Tasks': 'Aufgaben',
'Team Description': 'Teambeschreibung',
'Team Details': 'Details zum Team',
'Team Id': 'Team ID',
'Team Leader': 'Teamleiter',
'Team Member added': 'Teammitglied hinzugefügt',
'Team Members': 'Teammitglieder',
'Team Name': 'Name des Teams',
'Team Type': 'Type des Teams',
'Team added': 'Team hinzugefügt',
'Team deleted': 'Team gelöscht',
'Team updated': 'Team aktualisiert',
'Technical testing only, all recipients disregard': 'Diese Benachrichtung ist ein technischer Test, bitte ignorieren',
'Telecommunications': 'Telekommunikation',
'Telephone': 'Telefon',
'Telephony': 'Telefonie',
'Temp folder %s not writable - unable to apply theme!': 'Temporärer Ordner %s nicht beschreibbar - Layout (theme) kann nicht angewandt werden!',
'Template Name': 'Name der Vorlage',
'Template file %s not readable - unable to apply theme!': 'Template Datei %s nicht lesbar - Layout (theme) kann nicht angewandt werden!',
'Templates': 'Vorlagen',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Begriff für die 5. Ebene der Verwaltungshierarchie eines Landes (z.B. eine Wahl- oder Postleitzahlenbereich). Diese Stufe wird nicht oft verwendet.',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Begriff für die 4. Ebene der Verwaltungshierarchie eines Landes (z.B. Dorf, Stadtteil).',
'Term for the primary within-country administrative division (e.g. State or Province).': 'Begriff für die 1. Ebene der Verwaltungshierarchie eines Landes (z. B. Staat oder Bundesland).',
'Term for the secondary within-country administrative division (e.g. District or County).': 'Begriff für die 2. Ebene der Verwaltungshierarchie eines Landes (z. B. Regierungsbezirk oder Landkreis).',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'Begriff für die 3. Ebene der Verwaltungshierarchie eines Landes (z. B. Ort oder Stadt).',
'Term for the top-level administrative division (i.e. Country).': 'Begriff für die Verwaltung der höchsten Ebene (d. h. Land).',
'Test Results': 'Testergebnisse',
'Territorial Authority': 'Territoriale Behörde',
'Terrorism': 'Terrorismus',
'Tertiary Server (Optional)': 'Tertiärer Server (Optional)',
'Text Color for Text blocks': 'Text Farbe für Text Blöcke',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Danke für die Validierung Ihrer E-Mail. Ihr Benutzeraccount wurde vom Systemadministrator noch nicht genehmigt (%s). Sie werden eine Benachrichtigung per E-Mail erhalten wenn ihr Account aktiviert wurde.',
'Thanks for your assistance': 'Danke für Ihre Hilfe',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'Die "query" ist eine Bedingung für "db.table1.field1==\'value\'". Irgendetwas wie "db.table1.field1 == db.table2.field2" führt zu einem SQL JOIN.',
'The Area which this Site is located within.': 'Der Bereich, in dem sich dieser Ort befindet.',
'The Assessments module allows field workers to send in assessments.': 'Das Beurteilungsmodul erlaubt allen Aussendienstmitarbeitern ihre Beurteilungen einzusenden.',
'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyze': 'Das Beurteilungsmodul speichert Beurteilungsvorlagen und erlaubt Antworten auf Beurteilungen spezieller Ereignisse zu sammeln und auszuwerten',
'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'Das Beurteilungsmodul speichert Beurteilungsvorlagen und erlaubt es Antworten zu speziellen Ereignissen zu sammeln und zu analysieren',
'The Author of this Document (optional)': 'Der Auto dieses Dokumentes (optional)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'Das Gebäudebeurteilungsmodul erlaubt die Sicherheit eines Gebäudes zu beurteilen, z. B. nach einem Erdbeben.',
'The Camp this Request is from': 'Das Camp von dem diese Anfrage stammt',
'The Camp this person is checking into.': 'Das Camp, in das diese Person überführt wird',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Die aktuelle Position der Person/Gruppe, welche ungenau (für die Berichterstellung) oder genau (zur Anzeige von auf einer Karte) sein kann. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'Die E-mail Adresse an welche die Genehmigungen gesendet werden (normalerweise ist das eine Gruppen-Mail, keine Adresse einer Einzelperson) Wenn das Feld leer ist, dann werden Anforderungen automatisch genehmigt, wenn die Domänennamen übereinstimmen.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Das Vorfall Berichtssystem ermöglicht der Allgemeinheit Vorfälle zu melden und diese verfolgen zu lassen.',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Der Herkunftsort der Person kann ungenau (für die Berichterstellung) oder genau (zur anzeige auf einer Karte ) sein. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Der Ort, zu dem die Person gehen wird, welcher ungenau (für Berichte) oder genau (für die Darstellung auf einer Karte) sein kann. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Media Library provides a catalog of digital media.': 'Das Medienverzeichnis bietet einen Katalog digitaler Medien',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Das Nachrichtenmodul ist der Hauptknotenpunkt der Kommunikation des Sahana Systems. Es wird verwendet, um Warnungen und/oder andere Nachrichten mit Hilfe von SMS & E-Mail an unterschiedliche Gruppen und Einzelpersonen während und nach einem Katastrophenfall zu schicken.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'Das Organisationsregister gibt einen Überblick über alle Hilfsorganisationen, die in der Region arbeiten.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Das Projektüberwachungsmodul ermöglicht die Erstellung von Aktivitäten um Lücken in Anforderungsbewertungen zu füllen.',
'The Role this person plays within this hospital.': 'Die Rolle die diese Person im Krankenhaus übernimmt.',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Das Unterkunftsregister protokolliert alle Unterkünfte und speichert allgemeine Details. Es arbeitet mit anderen Modulen zusammen, um Menschen die sich in einer Unterkunft befinden, sowie die dort zur Verfügung stehenden Leistungen etc. zu dokumentieren.',
'The Shelter this Request is from': 'Die Unterkunft aus welcher diese Anforderung stammt',
'The Shelter this person is checking into.': 'Die Unterkunft in die diese Person eincheckt.',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'Die URL zur "GetCapabilities" Operation eines MapWebService (WMS), dessen Kartenbenen über die Anzeige verfügbar sein sollen.',
'The URL of your web gateway without the post parameters': 'Die URL ihres Web gateways ohne die POST parameter.',
'The URL to access the service.': 'Die URL für den Zugriff zum Service.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'Die eindeutige Kennung (UUID) die dieser Einrichtung von der Regierung zugeordnet wurde.',
'The asset must be assigned to a site OR location.': 'Die Anlage muss einem Standort oder einem Gelände zugeordnet werden',
'The attribute which is used for the title of popups.': 'Das Atribut welches für den Titel von Dialogfenstern verwendet wird',
'The attribute within the KML which is used for the title of popups.': 'Das Attribut in der KML das für den Titel der Dialogfenster verwendet wird.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'Die Attribute innerhalb der KML, die für den body des Dialogfenster verwendet werden sollen. (Verwenden Sie ein Leerzeichen zwischen Attributen)',
'The body height (crown to heel) in cm.': 'Die Körpergrösse (Kopf bis Fuss) in cm.',
'The country the person usually lives in.': 'Das Land, in dem die Person normalerweise lebt.',
'The default Organization for whom this person is acting.': 'Die Standardorganisation, für die diese Person agiert',
'The default Organization for whom you are acting.': 'Die Standardorganisation für welche Sie agieren',
'The duplicate record will be deleted': 'Der doppelte Datensatz wird gelöscht.',
'The first or only name of the person (mandatory).': 'Der erste oder einzige Name der Person (erforderlich)',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'Das Format der URL ist http://your/web/map/service?service=WMS&request=GetCapabilities wobei your/web/map/service für den Pfad der URL zum WMS steht',
'The language you wish the site to be displayed in.': 'Die Sprache in der die Seite angezeigt werden soll.',
'The list of Brands are maintained by the Administrators.': 'Die Liste der Marken wird von den Administratoren verwaltet.',
'The list of Catalogs are maintained by the Administrators.': 'Die Liste der Kataloge wird vom Administrator verwaltet.',
'The map will be displayed initially with this latitude at the center.': 'Die Karte wird zunächst auf diese Geographische Breite zentriert.',
'The map will be displayed initially with this longitude at the center.': 'Die Karte wird zunächst auf diese Geographische Länge zentriert.',
'The minimum number of features to form a cluster.': 'Die minimale Anzahl von Objekten, die als Cluster angezeigt werden.',
'The name to be used when calling for or directly addressing the person (optional).': 'Der zu verwendende Name beim Anfragen oder direkten Ansprechen der Person (optional).',
'The next screen will allow you to detail the number of people here & their needs.': 'Die nächste Bildschirm erlaubt es, nähere Angaben zur Anzahl Menschen hier & ihrer Bedürfnisse zu machen.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'Die Anzahl der Maßeinheiten eines alternativen Artikels, welcher einer Maßeinheit von diesem Artikel entspricht',
'The number of pixels apart that features need to be before they are clustered.': 'Mindestanzahl erforderlicher Pixel, damit sie nicht in Clustern zusammengefasst dargestellt werden.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Die Anzahl der Teilbilder rund um den sichtbaren Kartenausschnitt die heruntergeladen werden. Null bedeutet, dass die erste Seite schneller geladen wird, höhere Zahlen bedeuten dass nachfolgendes Schwenken schneller ist.',
'The person at the location who is reporting this incident (optional)': 'Die Person vor Ort welche das Ereignis meldet (optional)',
'The post variable containing the phone number': 'Der POST Parameter, der die Telefonnummer beinhaltet',
'The post variable on the URL used for sending messages': 'Der POST Parameter, der die Nachricht beinhaltet.',
'The post variables other than the ones containing the message and the phone number': 'Die POST Parameter, die nicht die Nachricht oder Telefonnummer beinhalten',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'Der serielle Anschluss mit dem das Modem verbunden ist - /dev/ttyUSB0, etc unter linux und com1, com2, etc unter Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'Der Server hat keine rechtzeitige Antwort von einem anderen Server erhalten, um die Anfrage des Clients beantworten zu können.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'Der Server hat eine ungültige Antwort von einem anderen Server erhalten, dass er zugreift um die Anfrage vom Browser zu erfüllen.',
'The site where this position is based.': 'Das Gelände auf dem dieser Standort/Gebiet liegt.',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Die zuständigen Mitarbeiter für Anlagen können Hilfe anfordern. Bezüglich dieser Anfragen können Zusagen gemacht werden. Diese bleiben solange offen, bis der Anforderer bestätigt, dass die Anfrage erfüllt ist.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'Das genannte Ereignis stellt keine Bedrohung oder Sorge mehr dar und jede nachfolgende Aktion is unter <instruction> beschrieben.',
'The time at which the Event started.': 'Die Zeit zu der das Ereignis startete.',
'The token associated with this application on': 'Das token welches mit dieser Anwendung verbunden ist',
'The unique identifier which identifies this instance to other instances.': 'Die eindeutige Kennung (UUID), die diese Instanz bei der Kommunikation mit anderen Instanzen identifiziert.',
"The volunteer's role": "Rolle des Freiwilligen",
'The way in which an item is normally distributed': 'Die Art in der ein Artikel normalerweise verteilt wird.',
'The weight in kg.': 'Das Gewicht in kg.',
'The': 'Das',
'Thematic Mapping': 'Thematische Kartendarstellung',
'Theme Details': 'Details zum Thema',
'Theme added': 'Thema hinzugefügt',
'Theme deleted': 'Thema gelöscht',
'Theme updated': 'Thema aktualisiert',
'Theme': 'Thema',
'Themes': 'Themen',
'There are errors': 'Es sind Fehler aufgetreten',
'There are insufficient items in the Inventory to send this shipment': 'Es sind nicht genügend Artikel im Bestand um diese Lieferung zu abzusenden.',
'There are multiple records at this location': 'An dieser Stelle gibt es mehrere Datensätze',
'There is no address for this person yet. Add new address.': 'Für diese Person gibt es noch keine Adresse. Fügen Sie eine neue Adresse hinzu.',
'These are settings for Inbound Mail.': 'Dies sind Einstellungen für eingehende Mail.',
'These are the Incident Categories visible to normal End-Users': 'Dies sind die für alle Endbenutzer sichtbaren Kategorien von Vorfällen',
'These need to be added in Decimal Degrees.': 'Diese müssen in Dezimalgrad hinzugefügt werden.',
'They': 'Sie',
'This Group has no Members yet': 'Diese Gruppe hat noch keine Mitglieder',
'This Team has no Members yet': 'Dieses Team hat noch keine Mitglieder',
'This appears to be a duplicate of': 'Dies scheint ein Duplikat zu sein von',
'This file already exists on the server as': 'Diese Datei existiert bereits auf dem Server als',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': "Dies ist zulässig, wenn sich die Stufe noch im Aufbau befindet. Um unbeabsichtige Änderungen zu verhindern, nachdem dieses Level abgeschlossen ist, kann dies auf 'False' gesetzt werden.",
'This is the way to transfer data between machines as it maintains referential integrity.': 'Auf diese Weise werden Daten zwischen Maschinen übertragen um die referenzielle Integrität aufrecht zu erhalten.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Auf diese Weise werden Daten zwischen Maschinen übertragen, um die referenzielle Integrität aufrechtzu erhalten. Doppelte Daten sollten vorher manuell entfernt werden.',
'This level is not open for editing.': 'Diese Stufe ist nicht zum Bearbeiten freigegeben.',
'This might be due to a temporary overloading or maintenance of the server.': 'Dies wurde möglicherweise durch eine vorübergehende Überlastung oder Wartung des Servers ausgelöst.',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Dieses Modul ermöglicht es, Bestandsartikel zwischen Beständen verschiedener Anlagen Anzufragen und zu liefern.',
'This module allows the editing of page content using a web browser.': 'Dieses Modul ermöglicht das Editieren der Webseite unter Verwendung des Browsers.',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Mit diesem Modul können Szenarien sowohl für Übungen als auch für Ereignisse planen. Sie können geeignete Ressourcen (Menschen, Anlagen & Einrichtungen) zuordnen, damit diese leicht mobilisiert werden können.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Diese Seite zeigt ihnen die Protokolle von vorherigen Syncs. Klicken Sie auf den Link unten um auf diese Seite zu gelangen.',
'This screen allows you to upload a collection of photos to the server.': 'Diese Seite ermöglicht ihnen eine Sammlung von Fotos zum Server hochzuladen.',
'This setting can only be controlled by the Administrator.': 'Diese Einstellung kann nur vom Systemverwalter vorgenommen werden.',
'This shipment has already been received.': 'Diese Lieferung wurde bereits empfangen.',
'This shipment has already been sent.': 'Diese Lieferung wurde bereits abgeschickt.',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'Diese Lieferung wurde noch nicht empfangen - sie ist nicht abgebrochen worden weil sie immer noch editiert werden kann.',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'Diese Sendung wurde nicht gesendet-es ist nicht abgebrochen worden weil können immer noch bearbeitet werden.',
'This shipment will be confirmed as received.': 'Der Empfang dieser Lieferung wurde bestätigt.',
'Thunderstorm': 'Gewitter',
'Thursday': 'Donnerstag',
'Ticket Details': 'Details zum Ticket',
'Ticket ID': 'Ticket-ID',
'Ticket added': 'Ticket hinzugefügt',
'Ticket deleted': 'Ticket gelöscht',
'Ticket updated': 'Ticket aktualisiert',
'Ticketing Module': 'Ticket Modul',
'Tile Mapping Service': 'TileMapService',
'Tilt-up concrete': 'Konkrete Neigung',
'Timber frame': 'Holzrahmen',
'Timeline Report': 'Bericht zum Zeitplan',
'Timeline': 'Zeitplan',
'Time Out': 'Ausgangszeit',
'Time Question': 'Zeit Frage',
'Title': 'Titel',
'Title to show for the Web Map Service panel in the Tools panel.': 'Titel, mit der die WebMapService-Leiste in der Werkzeugleiste angezeigt wird',
'To Location': 'Zum Standort',
'To Organization': 'Zur Organisation',
'To Person': 'Zu Händen von',
'To begin the sync process, click the button on the right =>': 'Zum Starten der Synchronisierung, klicken Sie auf die Schaltfläche auf der rechten Seite =>',
'To begin the sync process, click this button =>': 'Um den Synchronisierungsprozess zu starten, klicken Sie diese Schaltfläche =>',
'To create a personal map configuration, click': 'Um eine persönliche Kartenkonfiguration zu erstellen, klicken Sie auf',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Zum Bearbeiten von OpenStreetMap, müssen Sie die Einstellungen in models/000_config. py anpassen',
'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': "Um die Zeitachse zu verschieben nutzen Sie bitte das Mausrad, die Pfeiltasten oder verschieben Sie sie per Drag'n Drop",
'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Um nach einer Jobbezeichnung zu suchen, geben sie einen beliebigen Teil des Namens ein. Sie können % als Wildcard verwenden.',
'To variable': 'zu variieren',
'To': 'Bis',
'To Address': 'Empfängeradresse',
'Tools': 'Arbeitsmittel',
'Tornado': 'Wirbelsturm',
'Total # of Target Beneficiaries': 'Gesamtzahl der Nutznießer',
'Total # of households of site visited': 'Gesamtzahl der Haushalte des besuchten Geländes',
'Total Beds': 'Betten insgesamt',
'Total Beneficiaries': 'Gesamtzahl Nutznießer',
'Total Budget': 'Gesamtbudget',
'Total Capacity (Night)': 'Gesamtkapazität (Nacht)',
'Total Cost per Megabyte': 'Gesamtkosten pro Megabyte',
'Total Cost per Minute': 'Gesamtkosten pro Minute',
'Total Cost': 'Gesamtkosten',
'Total Monthly Cost': 'Gesamte monatliche Kosten',
'Total Monthly': 'Insgesamt Monatlich',
'Total One-time Costs': 'Summe einmaliger Kosten',
'Total Persons': 'Gesamtzahl an Personen',
'Total Records: %(numrows)s': 'Gesamtzahl an Datensätzen %(numrows)s',
'Total Recurring Costs': 'Gesamte wiederkehrende Kosten',
'Total Unit Cost': 'Gesamtstückkosten',
'Total Units': 'Summe Einheiten',
'Total Value': 'Gesamtwert',
'Total Volume (m3)': 'Gesamtvolumen (m3)',
'Total Weight (kg)': 'Gesamtgewicht (kg)',
'Total gross floor area (square meters)': 'Gesamtgröße der Fläche (Quadratmeter)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Gesamtzahl der Betten in diesem Krankenhaus. Automatisch aktualisiert über die täglichen Berichte.',
'Total number of houses in the area': 'Gesamtzahl der Häuser im Gebiet',
'Total number of schools in affected area': 'Gesamtzahl der Schulen im betroffenen Gebiet',
'Total population of site visited': 'Gesamtzahl der Bevölkerung des besuchten Gebietes',
'Total': 'Summe',
'Tourist Group': 'Touristengruppe',
'Town': 'Stadt',
'Town / Municipality': 'Ort / Stadtbezirk',
'Traces internally displaced people (IDPs) and their needs': 'Verfolgung von Binnenflüchtlingen (IDP) und deren Bedürfnisse',
'Tracing': 'Verfolgung',
'Track Details': 'Details zum Track',
'Track deleted': 'Track gelöscht',
'Track updated': 'Track aktualisiert',
'Track uploaded': 'Track hochgeladen',
'Track with this Person?': 'Diese Person verfolgen?',
'Track': 'Track',
'Tracking of Projects, Activities and Tasks': 'Verfolgen von Projekten, Aktivitäten und Aufgaben',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Verfolgung von Basisinformationen über Ort, Einrichtungen und Größe von Unterkünften',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Verfolgung der Position, Verteilung, Kapazität und Aufteilung der Opfer auf Unterkünfte',
'Tracks': 'Verfolgungen',
'Traffic Report': 'Datenverkehrsbericht',
'Training Course Catalog': 'Schulungskurs-Katalog',
'Training Details': 'Details zur Schulung',
'Training Event': 'Schulungskurs',
'Training Events': 'Schulungskurse',
'Training Facility': 'Schulungseinrichtung',
'Training Hours (Month)': 'Trainingsstunden (Monat)',
'Training Hours (Year)': 'Trainingsstunden (Jahr)',
'Training Report': 'Schulungsbericht',
'Training added': 'Schulung hinzugefügt',
'Training deleted': 'Schulung gelöscht',
'Training updated': 'Schulung aktualisiert',
'Training': 'Schulung',
'Trainings': 'Weiterbildungen / Übungen',
'Transition Effect': 'Übergangseffekt',
'Transit Status': 'Transitstatus',
'Translation': 'Übersetzung',
'Transportation assistance, Rank': 'Transport-Unterstützung, Rank',
'Trauma Center': 'Trauma Zentrum',
'Travel Cost': 'Reisekosten',
'Tropical Storm': 'Tropischer Sturm',
'Tropo Messaging Token': 'Tropo Nachrichten Token',
'Tropo Settings': 'Tropo Einstellungen',
'Tropo settings updated': 'Tropo Einstellungen aktualisiert',
'Truck': 'Lastwagen',
'Try checking the URL for errors, maybe it was mistyped.': 'Untersuchen Sie die URL auf Fehler, vielleicht war sie falsch geschrieben.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': "Versuchen Sie den Knopf 'Aktualisieren/Erneut Laden' oder versuchen Sie nochmals die URL aus der Adresszeile.",
'Try refreshing the page or hitting the back button on your browser.': "Versuchen Sie die Seite zu aktualisieren oder den 'Zurück'-Knopf im Browser zu nutzen.",
'Tuesday': 'Dienstag',
'Tugboat Capacity': 'Schleppkahnkapazitäten',
'Tweeted by': 'Getwittert von',
'Tweeted on': 'Getwittert auf',
'Twilio Channels': 'Twilio Kanäle',
'Twitter Channels': 'Twitter Kanäle',
'Twitter ID or #hashtag': 'Twitter-ID oder #hashtag',
'Twitter InBox': 'Twitter Eingang',
'Twitter Search': 'Twitter Suche',
'Twitter Search Results': 'Twitter Suchergebnisse',
'Twitter Settings': 'Einstellungen für Twitter',
'Type of Construction': 'Bautyp',
'Type of water source before the disaster': 'Typ der Wasserquelle vor der Katastrophe',
'Type': 'Typ',
'Types': 'Typen',
'UN': 'UN',
'Un-Repairable': 'Nicht zu reparieren',
'Unable to parse CSV file!': 'CSV Datei kann nicht analysiert werden!',
'Understaffed': 'Unterbesetzt',
'Unidentified': 'Nicht identifiziert',
'Unit Cost': 'Kosten für Einheit',
'Unit Value': 'Einheitswert',
'Unit added': 'Einheit hinzugefügt',
'Unit deleted': 'Einheit gelöscht',
'Unit of Measure': 'Maßeinheit',
'Unit updated': 'Einheit aktualisiert',
'Unit': 'Einheit',
'Units': 'Einheiten',
'Unknown Peer': 'Unbekannter Peer',
'Unknown type of facility': 'Unbekannter Einrichtungstyp',
'Unknown': 'unbekannt',
'Unmark as duplicate': 'Duplikatsmeldung zurückziehen',
'Unreinforced masonry': 'Nicht verstärktes Mauerwerk',
'Unresolved Conflicts': 'Ungelöste Konflikte',
'Unsafe': 'Unsicher',
'Unselect to disable the modem': 'Abwählen um das Modem zu deaktivieren',
'Unsent': 'Nicht gesendet',
'Unsupported data format!': 'Nicht unterstütztes Datenformat!',
'Unsupported method!': 'Nicht unterstützte Methode!',
'Update Activity Report': 'Aktivitätsbericht aktualisieren',
'Update Cholera Treatment Capability Information': 'Aktualisieren der Informationen zu den Cholera Behandlungsmöglichkeiten',
'Update Request': 'Anfrage Aktualisieren',
'Update Service Profile': 'Leistungsprofil aktualisieren',
'Update Status': 'Status aktualisieren',
'Update Task Status': 'Status der Aufgabe aktualisieren',
'Update Unit': 'Enheit Aktualisieren',
'Update if Master': 'Aktualisiere wenn Master',
'Update if Newer': 'Aktualisiere falls neuer',
'Update your current ordered list': 'Aktualisieren Sie ihre aktuell bestellte Liste',
'Update': 'Aktualisierung',
'Updated By': 'Aktualisiert von',
'Upload Photos': 'Fotos hochladen',
'Upload Spreadsheet': 'Tabellendokument hochladen',
'Upload Track': 'Verfolgung hochladen',
'Upload a Spreadsheet': 'Ein Tabellendokument hochladen',
'Upload a file formatted according to the Template.': 'Laden Sie eine entsprechend der Vorlage formatierte Datei hoch.',
'Upload an Assessment Template import file': 'Upload einer Beurteilungsvorlage',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Grafikdatei hochladen (bmp, gif, jpeg-oder png), max. 300x300 Pixel!',
'Upload an image file here.': 'Laden Sie hier die Grafikdatei hoch.',
'Upload an image, such as a photo': 'Laden Sie eine Grafikdatei hoch, wie beispielsweise ein Foto',
'Uploaded Image': 'Hochgeladenes Bild',
'Upload translated files': 'Übersetzte Dateien hochladen',
'Upon Request': 'Eingehende Anfrage',
'Urban Fire': 'Siedlungsfeuer',
'Urban area': 'Stadtgebiet / Ballungsgebiet',
'Urgent': 'Dringend',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Verwende (...)&(...) für UND, (...)|(...) für ODER und ~(...) für NICHT um komplexere Abfragen zu erstellen.',
'Use Geocoder for address lookups?': "Verwendung von 'Geocoder' für Adressenüberprüfung?",
'Use deg, min, sec': 'Nutze Grad, Minuten, Sekunden',
'Use decimal': 'Nutze Dezimalgrad',
'Use default': 'Standardwert verwenden',
'Use for Login?': 'Für Login verwenden?',
'Use these links to download data that is currently in the database.': 'Verwenden Sie diese Links um Daten, die derzeit in der Datenbank liegen herunterzuladen.',
'Used by IRS & Assess': 'Verwendet vom IRS & Assess',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Verwendet in onHover Tooltip & Cluster Popups um verschiedene Typen zu unterscheiden.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Verwendet um onHover Tooltip zu erstellen & das 1. Feld wird ebenfalls im Cluster Dialogfeld benutzt um zwischen verschiedenen Datensätzen zu unterscheiden.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Wird zur Überprüfung genutzt, dass die eingegebene Geographische Länge für den Ort sinnvoll ist. Kann verwendet werden um Resources zu filtern die Standorte haben.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Wird zur Überprüfung genutzt, dass die eingegebene Geographische Breite für den Ort sinnvoll ist. Kann verwendet werden um Resources zu filtern die Standorte haben.',
'Used to import data from spreadsheets into the database': 'Dient dazu Daten aus Tabellendokumenten in die Datenbank zu übertragen.',
'Used within Inventory Management, Request Management and Asset Management': 'Verwendung beim der Bestands-, Anfrage- und Anlagenverwaltung',
'User Account has been Disabled': 'Das Benutzerkonto wurde deaktiviert',
'User Details': 'Details zum Benutzer',
'User Management': 'Benutzerverwaltung',
'User Profile': 'Benutzerprofil',
'User Requests': 'Benutzeranfragen',
'User Updated': 'Benutzer aktualisiert',
'User added': 'Benutzer hinzugefügt',
'User already has this role': 'Der Benutzer hat bereits diese Rolle',
'User deleted': 'Benutzer gelöscht',
'User updated': 'Benutzer aktualisiert',
'User': 'Benutzer',
'Username': 'Benutzername',
'Users removed': 'Benutzer entfernt',
'Users': 'Benutzer',
'Uses the REST Query Format defined in': 'Verwendet das REST-Abfrageformat das definiert ist in',
'Utilities': 'Dienstprogramme',
'Utility, telecommunication, other non-transport infrastructure': 'Dienstprogramm, Telekommunikation, andere nicht-Verkehrsinfrastruktur',
'Utilization Report': 'Verwendungsbericht',
'Valid until': 'Gültig bis',
'Value per Pack': 'Wert pro Packet',
'Value': 'wert',
'Various Reporting functionalities': 'Verschiedene Funktionalitäten für das Berichtswesen',
'Vehicle Categories': 'Fahrzeugkategorien',
'Vehicle Crime': 'Fahrzeug Kriminalität',
'Vehicle Height (m)': 'Höhe des Fahrzeugs (m)',
'Vehicle Management': 'Fahrzeugmanagement',
'Vehicle Plate Number': 'Fahrzeugnummernschild',
'Vehicle Type': 'Fahrzeugtyp',
'Vehicle Types': 'Fahrzeugtypen',
'Vehicle Weight (kg)': 'Gewicht des Fahrzeugs (kg)',
'Vehicle': 'Fahrzeug',
'Vehicles': 'Fahrzeuge',
'Vehicles are assets with some extra details.': 'Fahrzeuge sind Anlagen, die mit einigen speziellen Funktionen ausgestattet sind',
'Venue': 'Örtlichkeit',
'Verification Status': 'Prüfstatus',
'Verified?': 'Geprüft?',
'Verify password': 'Passwortprüfung',
'Very Good': 'Sehr gut',
'Very High': 'Sehr hoch',
'Vessel Max Length': 'Wasserfahrzeug maximale Länge',
'View Alerts received using either Email or SMS': 'Empfangene Warnungen über E-Mail oder SMS',
'View All': 'Alles anzeigen',
'View Error Tickets': 'Fehler Tickets ansehen',
'View Fullscreen Map': 'Vollbild Karte anzeigen',
'View Image': 'Bild anzeigen',
'View Items': 'Artikel anzeigen',
'View On Map': 'Auf Karte anzeigen',
'View Outbox': 'Postausgang anzeigen',
'View Picture': 'Bild anzeigen',
'View Settings': 'Einstellungen anzeigen',
'View Test Result Reports': 'Zeige Berichte der Testergebnisse',
'View Tickets': 'Tickets anzeigen',
'View Translation Percentage': 'Zeige Übersetzungsstatistik',
'View and/or update their details': 'Anzeige und/oder Aktualisieren Ihrer Detailinformationen',
'View as Pages': 'Anzeige als Seiten',
'View or update the status of a hospital.': 'Anzeige oder Aktualisieren des Status eines Krankenhauses.',
'View pending requests and pledge support.': 'Anstehende Anforderungen anzeigen und Zusageunterstützung.',
'View the hospitals on a map.': 'Krankenhäuser auf einer Karte anzeigen',
'View/Edit the Database directly': 'Die Datenbank direkt anzeigen/bearbeiten',
'Village Leader': 'Dorfvorsteher',
'Village / Suburb': 'Ortschaft / Vorort',
'Village': 'Dorf',
'Visible?': 'Sichtbar?',
'Visual Recognition': 'Visuelle Erkennung',
'Volcanic Ash Cloud': 'Wolke vulkanischer Asche',
'Volcanic Event': 'Vulkanischen Ereignis',
'Volume (m3)': 'Volumen (m3)',
'Volunteer Availability': 'Verfügbarkeit von Freiwilligen',
'Volunteer Contact': 'Kontaktdaten des Freiwilligen',
'Volunteer Details': 'Details zu Freiwilligen',
'Volunteer Information': 'Freiwilligeninformation',
'Volunteer Management': 'Management von Freiwilligen',
'Volunteer Project': 'Freiwilligen Projekt',
'Volunteer Record': 'Freiwilligen Datensatz',
'Volunteer Report': 'Freiwilligen Bericht',
'Volunteer Request': 'Freiwilligen Anforderung',
'Volunteer Role': 'Rolle des Freiwilligen',
'Volunteer Role Catalog': 'Rollenkatalog für Freiwillige',
'Volunteer added': 'Freiwilliger hinzugefügt',
'Volunteer availability added': 'Freiwilligen Verfügbarkeit hinzugefügt',
'Volunteer availability deleted': 'Freiwilligen Verfügbarkeit geöscht',
'Volunteer availability updated': 'Freiwilligen Verfügbarkeit aktualisiert',
'Volunteer deleted': 'Freiwilliger gelöscht',
'Volunteer details updated': 'Details zu Freiwilligen aktualisiert',
'Volunteers were notified!': 'Freiwillige wurden unterrichtet!',
'Volunteers': 'Freiwillige',
'Volunteer': 'Freiwilliger',
'Vote': 'Abstimmung',
'Votes': 'Abstimmungen',
'WASH': 'WASH',
'Walking Only': 'Nur laufen',
'Wall or other structural damage': 'Wand oder andere Gebäudeschäden',
'Warehouse Details': 'Details zu Warenlager',
'Warehouse Stock': 'Lagerbestand',
'Warehousing Storage Capacity': 'Warenlager Ablagekapazität',
'Warehouse Type': 'Warenlagertyp',
'Warehouse Types': 'Warenlagertypen',
'Warehouse added': 'Warenlager hinzugefügt',
'Warehouse deleted': 'Warenlager gelöscht',
'Warehouse updated': 'Warenlager aktualisiert',
'Warehouse': 'Warenlager',
'Warehouses': 'Warenlager',
'Water Sanitation Hygiene': 'Wasser Abwasserentsorgung Hygiene',
'Water collection': 'Wassersammlung',
'Water gallon': 'Wasser Gallonen',
'Water storage containers in households': 'Wasser-Behälter in Haushalten',
'Water supply': 'Wasserversorgung',
'Waybill Number': 'Frachtbriefnummer',
'WB': 'Frachtbriefnr.',
'Web Feature Service': 'WebFeatureService',
'Web Map Service': 'WebMapService',
'Web Map Service Browser Name': 'WebMapService Browser Name',
'Web Map Service Browser URL': 'WebMapService Browser URL',
'Website': 'Webseite',
'Wednesday': 'Mittwoch',
'Weight (kg)': 'Gewicht (kg)',
'Weight': 'Gewicht',
'Welcome to the Sahana Portal at': 'Willkommen beim Sahana Portal',
'Well-Known Text': 'WellKnownText (OGC-WKT)',
'What the Items will be used for': 'Beabsichtigte Verwendung der Artikel',
'Wheat': 'Weizen',
'When reports were entered': 'Wann die Berichte eingegeben wurden',
'Whiskers': 'Barthaare',
'Who is doing what and where': 'Wer macht was und wo',
'Who usually collects water for the family?': 'Wer sammelt normalerweise Wasser für die Familie?',
'Width': 'Breite',
'Width (m)': 'Breite (m)',
'Wild Fire': 'Wildfeuer',
'Wind Chill': 'Kälte vom Wind',
'Window frame': 'Fensterrahmen',
'Winter Storm': 'Wintersturm',
'Women of Child Bearing Age': 'Frauen im gebärfähigen Alter',
'Women participating in coping activities': 'Frauen die sich an den Hilfsaktivitäten beteiligen',
'Women who are Pregnant or in Labour': 'Frauen die schwanger sind oder in den Wehen',
'Womens Focus Groups': 'Focus Gruppen für Frauen',
'Wooden plank': 'Hölzerne Planke',
'Wooden poles': 'Holzmasten',
'Working hours end': 'Arbeitszeit Ende',
'Working hours start': 'Arbeitszeit Beginn',
'Working or other to provide money/food': 'Arbeiten oder etwas anderes um Geld/Lebensmittel zur Verfügung zu stellen.',
'written-only': 'nur schriftlich',
'XYZ Tiles': 'XYZ Tiles',
'X-Ray': 'Röntgen',
'X-Ray Done': 'Röntgen erledigt',
'YES': 'JA',
'Year built': 'Baujahr',
'Year of Manufacture': 'Herstellungsjahr',
'Year': 'Jahr',
'Yellow': 'Gelb',
'Yes': 'Ja',
'yes': 'ja',
'You are a recovery team?': 'Sind Sie ein Bergungsteam?',
'You are attempting to delete your own account - are you sure you want to proceed?': 'Sie versuchen Ihr eigenes Konto zu löschen - sind Sie sicher, dass Sie fortfahren möchten?',
'You are currently reported missing!': 'Sie sind derzeit als vermisst gemeldet!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Sie können die Konfiguration des Synchronisierungsmodules unter Einstellungen anpassen. Diese Konfiguration enthält ihre UUID (unique identification number), Synchronisierungszeitpläne, Beacon-Service, usw. . Klicken sie auf den folgenden Link um zu den Einstellungen für die Synchronisierung zu gelangen.',
'You can click on the map below to select the Lat/Lon fields': 'Sie können auf die untere Karte klicken um Geographische und Geographische Breiten abzugreifen.',
'You can select the Draw tool': 'Sie können das Zeichen Tool verwenden',
'You can set the modem settings for SMS here.': 'Sie können die Modemeinstellungen für SMS hier festlegen.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Sie können das Konvertierungprogamm verwenden von GPS-Koordinatenoder Grad/Minuten/Sekunden umzuwandeln.',
'You do not have permission for any facility to make a commitment.': 'Sie haben keine Berechtigung für irgendeine Einrichtung eine Zusage zu machen.',
'You do not have permission for any facility to make a request.': 'Sie haben keine Berechtigung für irgendeine Einrichtung eine Anfrage zu starten.',
'You do not have permission for any site to add an inventory item.': 'Sie haben keine Berechtigung für irgendein Gelände einen Bestandsartikel hinzuzufügen.',
'You do not have permission for any site to receive a shipment.': 'Sie haben keine Berechtigung für irgendein Gelände eine Lieferung anzunehmen.',
'You do not have permission for any site to send a shipment.': 'Sie haben keine Berechtigung für irgendein Gelände eine Lieferung abzusenden.',
'You do not have permission to cancel this received shipment.': 'Sie haben keine Berechtigung diese erhaltene Lieferung zu löschen.',
'You do not have permission to cancel this sent shipment.': 'Sie haben keine Berechtigung diese gesendete Lieferung zu löschen.',
'You do not have permission to make this commitment.': 'Sie haben keine Berechtigung diese Zusage zu machen.',
'You do not have permission to receive this shipment.': 'Sie haben keine Berechtigung diese Lieferung entgegenzunehmen.',
'You do not have permission to send a shipment from this site.': 'Sie haben keine Berechtigung Lieferungen von diesem Gelände zu senden.',
'You do not have permission to send this shipment.': 'Sie haben keine Berechtigung diese Lieferung zu senden.',
'You have a personal map configuration. To change your personal configuration, click': 'Sie haben eine persönliche Kartenkonfiguration. Um ihre persönliche Konfiguration zu ändern, klicken Sie hier',
'You have found a dead body?': 'Sie haben eine Leiche gefunden?',
'You must be logged in to register volunteers.': 'Sie müssen angemeldet sein, um Freiwillige zu registrieren.',
'You must be logged in to report persons missing or found.': 'Sie müssen angemeldet sein, um fehlende oder gefundene Personen zu melden.',
'You must provide a series id to proceed.': 'Sie müssen eine serien-id vorweisen, um fortzufahren.',
'You should edit Twitter settings in models/000_config.py': 'Sie sollten die Twitter Einstellungen unter models/000_config.py bearbeiten',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Ihre aktuelle, geordnete Liste der Lösungselemente wird unten angezeigt. Sie können es durch Abstimmen erneut verändern.',
'Your post was added successfully.': 'Der Eintrag wurde erfolgreich hinzugefügt.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Ihr System verfügt über eine eindeutige ID (UUID), die andere Computer nützen können um Sie zu identifizieren. Zum Anzeigen Ihrer UUID, können Sie zur Synchronisierung gehen --> Sync Einstellungen Sie könnem auch andere Einstellungen auf dieser Seite einsehen.',
'Zero Hour': 'Stunde null',
'Zinc roof': 'Zinkdach',
'Zoom Levels': 'Zoomebenen',
'Zoom in': 'Hineinzoomen',
'Zoom to Current Location': 'Auf aktuelles Gebiet/Standort fokussieren',
'Zoom to maximum map extent': 'Auf maximale Kartenausdehung fokussieren',
'Zoom': 'Zoomen',
'active': 'aktiv',
'added': 'hinzugefügt',
'all records': 'Alle Datensätze',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'Ermöglicht ein Budget zu entwickeln, basierend auf Mitarbeiter- und Gerätekosten, einschließlich aller administrativen Gemeinkosten.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'Ermöglicht die Erstellung und Verwaltung von Umfragen zur Beurteilung von Schäden nach einer Naturkatastrophe.',
'an individual/team to do in 1-2 days': 'Eine Aufwand von 1-2 Tagen für ein einzelnes Team',
'assigned': 'zugewiesen',
'average': 'Durchschnitt',
'black': 'schwarz',
'blue': 'blau',
'brown': 'braun',
'business_damaged': 'Business_beschädigt',
'by': 'durch',
'can be used to extract data from spreadsheets and put them into database tables.': 'Kann verwendet werden um Daten von einer Tabelle zu extrahieren und diese in Datenbanktabellen einzutragen.',
'check all': 'Alles markieren',
'click for more details': 'hier klicken, um mehr Details zu erhalten',
'consider': 'Berücksichtigen',
'curly': 'lockig',
'currently registered': 'derzeitig registriert',
'daily': 'täglich',
'dark': 'dunkel',
'data uploaded': 'hochgeladene Daten',
'database %s select': 'Datenbank%s gewählt',
'database': 'Datenbank',
'deceased': 'Verstorbene',
'delete all checked': 'Alle Ausgewählten löschen',
'deleted': 'gelöscht',
'design': 'Design',
'diseased': 'erkrankt',
'displaced': 'vertrieben',
'divorced': 'geschieden',
'done!': 'fertig!',
'duplicate': 'Dublette',
'eg. gas, electricity, water': 'zum Beispiel Gas, Strom, Wasser',
'enclosed area': 'eingeschlossener Bereich',
'export as csv file': 'Exportieren als CSV-Datei',
'fat': 'fett',
'feedback': 'Rückmeldung',
'female': 'weiblich',
'flush latrine with septic tank': 'die provisorische Toilette mit dem fauligen Tank spülen',
'food_sources': 'lebensmittel_quellen',
'forehead': 'Stirn',
'found': 'gefunden',
'from Twitter': 'aus Twitter',
'green': 'Grün',
'grey': 'grau',
'here': 'hier',
'high': 'hoch',
'hourly': 'stündlich',
'households': 'Haushalte',
'identified': 'identifiziert',
'ignore': 'ignorieren',
'in Deg Min Sec format': 'im Format Grad Minuten Sekunden',
'inactive': 'inaktiv',
'injured': 'verletzt',
'insert new %s': 'neue %en hinzufügen',
'insert new': 'neu einfügen',
'invalid request': 'Ungültige Anfrage',
'invalid': 'ungültig',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'ist ein zentrales online Verzeichnis, in dem Informationen zu allen Opfern und Familien der Katastrophe gespeichert werden können, insbesondere identifizierte Verluste, Evakuierte, Flüchtlinge, Heimatlose. Informationen wie Name, Alter, Kontaktnummer, Ausweisnummer, Vertriebenen-Ort und andere Details werden erfasst. Fotos und Fingerabdrücke der Leute können auf das System hochgeladen werden. Personen können zum Zweck der Effizienz und Einfachheit auch in Gruppen zusammengefasst werden',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'ist so konzipiert, dass es aus mehreren Untermodulen zu besteht. Diese arbeiten zusammen, um Organisationen komplexe Funktionalitäten zur Unterstützung von Hilfen und Durchführung von Projekten zur Verfügung zu stellen. Dies beinhaltet ein Aufnahmesystem, ein Warenlager Management System, Produkt-Tracking, Versorgungsketten-Management, Fahrzeugbestand Management, Beschaffungswesen, Finanz-Tracking und andere Bestands- und Resource Management Einsatzmöglichkeiten.',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Überwacht alle eingehenden Tickets, so dass diese entsprechend eingestuft und an die entsprechende Stelle zur Bearbeitung geleitet werden können.',
'latrines': 'Toiletten',
'leave empty to detach account': 'Leerlassen um das Konto zu entfernen/aufzuheben.',
'legend URL': 'URL zur Legende',
'light': 'lichtquelle',
'login': 'Anmeldung',
'long': 'lang',
'long>12cm': 'lang > 12cm',
'low': 'niedrig',
'male': 'männlich',
'manual': 'manuell',
'married': 'verheiratet',
'medium': 'mittel',
'medium<12cm': 'mittel < 12 cm',
'meters': 'meter',
'missing': 'fehlend',
'module allows the site administrator to configure various options.': 'Modul das dem Seitenadministrator ermöglicht verschiedene Optionen zu konfigurieren.',
'module helps monitoring the status of hospitals.': 'Modul das hilft den Status von Krankenhäusern zu überwachen',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'Modul das gemeinschaftlich einen Mechanismus bietet einen GIS-gestützen Überblick über die sich entwickelnde Lage zu erhalten.',
'more': 'mehr',
'n/a': 'nicht zutreffend',
'negroid': 'Negroid',
'never': 'nie',
'new record inserted': 'Neuen Datensatz eingefügt',
'new': 'neu',
'next 100 rows': 'Nächste 100 Zeilen',
'no': 'nein',
'none': 'nichts',
'not accessible - no cached version available!': 'Nicht verfügbar - keine zwischengespeicherte Version verfügbar!',
'not accessible - using cached version from': 'Nicht verfügbar - benutze zwischengespeicherte Version von',
'not specified': 'nicht angegeben',
'obsolete': 'obsolet',
'on': 'ein',
'once': 'einmal',
'open defecation': 'Verrichtung der Bedürfnisse im Freien',
'or import from csv file': 'oder aus CSV-Datei importieren',
'other': 'sonstige',
'over one hour': 'über eine Stunde',
'or drop here': "oder hier per Drag'n Drop ablegen",
'people': 'Personen',
'piece': 'Stück',
'pit latrine': 'Grubenlatrine',
'pit': 'Grube',
'postponed': 'zurückgestellt',
'preliminary template or draft, not actionable in its current form': 'vorläufige Vorlage oder Entwurf, nicht aussagekräftig in seiner jetzigen Form',
'previous 100 rows': 'Vorherige 100 Zeilen',
'record does not exist': 'Datensatz ist nicht vorhanden',
'record id': 'Datensatz ID',
'red': 'rot',
'reports successfully imported.': 'Berichte erfolgreich importiert.',
'representation of the Polygon/Line.': 'Darstellung der Fläche/Linie.',
'retired': 'Außer Dienst',
'river': 'Fluss',
'see comment': 'siehe Kommentar',
'selected': 'ausgewählt',
'separated from family': 'von Familie getrennt',
'separated': 'getrennt',
'shaved': 'rasiert',
'short': 'kurz',
'short<6cm': 'kurz < 6cm',
'sides': 'Seiten',
'sign-up now': 'Jetzt Registrieren',
'single': 'alleinstehend',
'slim': 'dünn',
'specify': 'genauer beschreiben',
'staff members': 'Mitarbeiter',
'staff': 'Personal',
'state location': 'Beschaffenheit des Standort',
'state': 'Zustand',
'straight': 'gerade',
'suffered financial losses': 'Finanzielle Verluste erlitten',
'table': 'Tabelle',
'tall': 'groß',
'this': 'Dieses',
'to access the system': 'um auf das System zuzugreifen',
'tonsure': 'Tonsur',
'total': 'Summe',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'Tweepy Modul nicht verfügbar in der aktuellen Python Umgebung läuft - das benötigt die Installation einer none-Tropo Twitter Unterstützung!',
'unable to parse csv file': 'CSV Datei kann nicht analysiert werden',
'uncheck all': 'Alles deselektieren',
'unidentified': 'nicht identifiziert',
'unknown': 'unbekannt',
'unspecified': 'unspezifiziert',
'unverified': 'ungeprüft',
'updated': 'aktualisiert',
'updates only': 'nur Aktualisierungen',
'verified': 'verifiziert',
'volunteer': 'Freiwilliger',
'volunteers': 'Freiwillige',
'wavy': 'wellenförmige Lücke',
'weekly': 'wöchentlich',
'white': 'weiß',
'wider area, longer term, usually contain multiple Activities': 'Größerer Bereich, längere Sicht, enthält normalerweise mehrere Aktivitäten',
'widowed': 'verwitwet',
'within human habitat': 'In menschlichen Lebensraum',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt Modul nicht verfügbar im Rahmen der laufenden Python Umgebung - das muss installiert werden für XLS Ausgabe!'
}
|
nck0405/ChennaiEden
|
languages/de.py
|
Python
|
mit
| 287,136
|
[
"VisIt"
] |
1439ebb507da1883fa85c30b3c8ab2f16973c631ffa58920e49f515139a67ca7
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
from builtins import str
from singa import singa_wrap as singa_api
from singa import tensor
from singa import singa_wrap as singa
from singa import autograd
from singa import sonnx
from singa import opt
import onnx
from onnx import (defs, checker, helper, numpy_helper, mapping, ModelProto,
GraphProto, NodeProto, AttributeProto, TensorProto,
OperatorSetIdProto)
from onnx.helper import make_tensor, make_tensor_value_info, make_node, make_graph
from cuda_helper import gpu_dev, cpu_dev
import numpy as np
autograd.training = True
def _tuple_to_string(t):
lt = [str(x) for x in t]
return '(' + ', '.join(lt) + ')'
class TestPythonOnnx(unittest.TestCase):
def check_shape(self, actual, expect):
self.assertEqual(
actual, expect, 'shape mismatch, actual shape is %s'
' exepcted is %s' %
(_tuple_to_string(actual), _tuple_to_string(expect)))
def _conv2d_helper(self, dev):
x = tensor.Tensor(shape=(2, 3, 3, 3), device=dev)
x.gaussian(0.0, 1.0)
y = autograd.Conv2d(3, 1, 2)(x)
# frontend
model = sonnx.to_onnx([x], [y])
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_conv2d_cpu(self):
self._conv2d_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_conv2d_gpu(self):
self._conv2d_helper(gpu_dev)
def _relu_helper(self, dev):
X = np.array([0.8, -1.2, 3.3, -3.6, -0.5,
0.5]).reshape(3, 2).astype(np.float32)
XT = np.array([0.8, 0, 3.3, 0, 0, 0.5]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.ReLU()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_relu_cpu(self):
self._relu_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_relu_gpu(self):
self._relu_helper(gpu_dev)
def _avg_pool_helper(self, dev):
x = tensor.Tensor(shape=(2, 3, 3, 3), device=dev)
x.gaussian(0.0, 1.0)
y = autograd.AvgPool2d(3, 1, 2)(x)
# frontend
model = sonnx.to_onnx([x], [y])
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_avg_pool_cpu(self):
self._avg_pool_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_avg_pool_gpu(self):
self._avg_pool_helper(gpu_dev)
def _softmax_helper(self, dev):
X = np.array([[-1, 0, 1]]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.SoftMax()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_softmax_cpu(self):
self._softmax_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_softmax_gpu(self):
self._softmax_helper(gpu_dev)
def _sigmoid_helper(self, dev):
X = np.array([[-1, 0, 1]]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.Sigmoid()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_sigmoid_cpu(self):
self._sigmoid_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_sigmoid_gpu(self):
self._sigmoid_helper(gpu_dev)
def _add_helper(self, dev):
X1 = np.random.randn(3, 4, 5).astype(np.float32)
X2 = np.random.randn(3, 4, 5).astype(np.float32)
x1 = tensor.from_numpy(X1)
x2 = tensor.from_numpy(X2)
x1.to_device(dev)
x2.to_device(dev)
y = autograd.Add()(x1, x2)[0]
# frontend
model = sonnx.to_onnx([x1, x2], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x1, x2])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_add_cpu(self):
self._add_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_add_gpu(self):
self._add_helper(gpu_dev)
def _concat_helper(self, dev):
X1 = np.random.randn(3, 4, 5).astype(np.float32)
X2 = np.random.randn(3, 4, 5).astype(np.float32)
x1 = tensor.from_numpy(X1)
x2 = tensor.from_numpy(X2)
x1.to_device(dev)
x2.to_device(dev)
y = autograd.Concat()(x1, x2)[0]
# frontend
model = sonnx.to_onnx([x1, x2], [y])
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x1, x2])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_concat_cpu(self):
self._concat_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_concat_gpu(self):
self._concat_helper(gpu_dev)
def _matmul_helper(self, dev):
X1 = np.random.randn(4, 5).astype(np.float32)
X2 = np.random.randn(5, 4).astype(np.float32)
x1 = tensor.from_numpy(X1)
x2 = tensor.from_numpy(X2)
x1.to_device(dev)
x2.to_device(dev)
y = autograd.Matmul()(x1, x2)[0]
# frontend
model = sonnx.to_onnx([x1, x2], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x1, x2])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_matmul_cpu(self):
self._matmul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_matmul_gpu(self):
self._matmul_helper(gpu_dev)
def _max_pool_helper(self, dev):
x = tensor.Tensor(shape=(2, 3, 4, 4), device=dev)
x.gaussian(0.0, 1.0)
y = autograd.MaxPool2d(2, 2, 0)(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_max_pool_cpu(self):
self._max_pool_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_max_pool_gpu(self):
self._max_pool_helper(gpu_dev)
def _batch_norm_helper(self, dev):
x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
s = np.array([1.0, 1.5]).astype(np.float32)
bias = np.array([0, 1]).astype(np.float32)
mean = np.array([0, 3]).astype(np.float32)
var = np.array([1, 1.5]).astype(np.float32)
x = tensor.from_numpy(x)
x.to_device(dev)
s = tensor.from_numpy(s)
s.to_device(dev)
bias = tensor.from_numpy(bias)
mean = tensor.from_numpy(mean)
var = tensor.from_numpy(var)
bias.to_device(dev)
mean.to_device(dev)
var.to_device(dev)
if dev == cpu_dev:
handle = singa.BatchNormHandle(0.9, x.data)
else:
handle = singa.CudnnBatchNormHandle(0.9, x.data)
y = autograd.batchnorm_2d(handle, x, s, bias, mean, var)
# frontend
model = sonnx.to_onnx([x, s, bias, mean, var], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x, s, bias]) # mean and var has been stored in graph
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_batch_norm_cpu(self):
self._batch_norm_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_batch_norm_gpu(self):
self._batch_norm_helper(gpu_dev)
def _linear_helper(self, dev):
x = tensor.Tensor(shape=(2, 20), device=dev)
x.gaussian(0.0, 1.0)
x1 = x.clone()
y = autograd.Linear(20, 1, bias=False)(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_linear_cpu(self):
self._linear_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_linear_gpu(self):
self._linear_helper(gpu_dev)
def _gemm_helper(self, dev):
A = np.random.randn(2, 3).astype(np.float32)
B = np.random.rand(3, 4).astype(np.float32)
C = np.random.rand(2, 4).astype(np.float32)
alpha = 1.0
beta = 2.0
tA = tensor.from_numpy(A)
tB = tensor.from_numpy(B)
tC = tensor.from_numpy(C)
tA.to_device(dev)
tB.to_device(dev)
tC.to_device(dev)
y = autograd.Gemm(alpha, beta, 0, 0)(tA, tB, tC)[0]
# frontend
model = sonnx.to_onnx([tA, tB, tC], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([tA, tB, tC])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_gemm_cpu(self):
self._gemm_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gemm_gpu(self):
self._gemm_helper(gpu_dev)
def _reshape_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
x.to_device(dev)
y = autograd.Reshape((2, 3))(x)[0]
# frontend
model = sonnx.to_onnx([x, (2, 3)], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x]) # shape has been stored in graph
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_reshape_cpu(self):
self._reshape_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_reshape_gpu(self):
self._reshape_helper(gpu_dev)
def _sum_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x1 = np.array([0.1, 1.0, 0.4, 4.0, 0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
x1 = tensor.from_numpy(x1)
y = autograd.Sum()(x, x1)[0]
# frontend
model = sonnx.to_onnx([x, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_sum_cpu(self):
self._sum_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_sum_gpu(self):
self._sum_helper(gpu_dev)
def _Cos_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Cos()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Cos_cpu(self):
self._Cos_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Cos_gpu(self):
self._Cos_helper(gpu_dev)
def _Cosh_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Cosh()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Cosh_cpu(self):
self._Cosh_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Cosh_gpu(self):
self._Cosh_helper(gpu_dev)
def _Sin_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Sin()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Sin_cpu(self):
self._Sin_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Sin_gpu(self):
self._Sin_helper(gpu_dev)
def _Sinh_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Sinh()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Sinh_cpu(self):
self._Sinh_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Sinh_gpu(self):
self._Sinh_helper(gpu_dev)
def _Tan_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Tan()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Tan_cpu(self):
self._Tan_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Tan_gpu(self):
self._Tan_helper(gpu_dev)
def _Tanh_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Tanh()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Tanh_cpu(self):
self._Tanh_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Tanh_gpu(self):
self._Tanh_helper(gpu_dev)
def _Acos_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Acos()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Acos_cpu(self):
self._Acos_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Acos_gpu(self):
self._Acos_helper(gpu_dev)
def _Acosh_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Acosh()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Acosh_cpu(self):
self._Acosh_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Acosh_gpu(self):
self._Acosh_helper(gpu_dev)
def _Asin_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Asin()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Asin_cpu(self):
self._Asin_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Asin_gpu(self):
self._Asin_helper(gpu_dev)
def _Asinh_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Asinh()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Asinh_cpu(self):
self._Asinh_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Asinh_gpu(self):
self._Asinh_helper(gpu_dev)
def _Atan_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Atan()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Atan_cpu(self):
self._Atan_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Atan_gpu(self):
self._Atan_helper(gpu_dev)
def _Atanh_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.Atanh()(x)[0]
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Atanh_cpu(self):
self._Atanh_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Atanh_gpu(self):
self._Atanh_helper(gpu_dev)
def _SeLu_helper(self, dev):
x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
#y = gamma * (alpha * e^x - alpha) for x <= 0, y = gamma * x for x > 0
a = 1.67326
g = 1.0507
x = tensor.from_numpy(x)
x.to_device(dev)
y = autograd.selu(x, a, g)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_SeLu_cpu(self):
self._SeLu_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_SeLu_gpu(self):
self._SeLu_helper(gpu_dev)
def _ELu_helper(self, dev):
x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
#y = gamma * (alpha * e^x - alpha) for x <= 0, y = gamma * x for x > 0
a = 1.
x = tensor.from_numpy(x)
x.to_device(dev)
y = autograd.elu(x, a)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_ELu_cpu(self):
self._ELu_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_ELu_gpu(self):
self._ELu_helper(gpu_dev)
# No Op registered for equal with domain_version of 11
# def _Equal_helper(self, dev):
# x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
# 0.9]).reshape(3, 2).astype(np.float32)
# x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3,
# 2).astype(np.float32)
# x0 = tensor.from_numpy(x0)
# x1 = tensor.from_numpy(x1)
# x0.to_device(dev)
# x1.to_device(dev)
# y = autograd.equal(x0, x1)
# # frontend
# model = sonnx.to_onnx([x0, x1], [y])
# # print('The model is:\n{}'.format(model))
# # backend
# sg_ir = sonnx.prepare(model, device=dev)
# y_t = sg_ir.run([x0, x1])
# np.testing.assert_array_almost_equal(tensor.to_numpy(y),
# tensor.to_numpy(y_t[0]),
# decimal=5)
# def test_Equal_cpu(self):
# self._Equal_helper(cpu_dev)
# @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
# def test_Equal_gpu(self):
# self._Equal_helper(gpu_dev)
def _Less_helper(self, dev):
x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3,
2).astype(np.float32)
x0 = tensor.from_numpy(x0)
x1 = tensor.from_numpy(x1)
x0.to_device(dev)
x1.to_device(dev)
y = autograd.less(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Less_cpu(self):
self._Less_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Less_gpu(self):
self._Less_helper(gpu_dev)
def _Sign_helper(self, dev):
x = np.array([0.8, -1.2, 3.3, -3.6, -0.5,
0.5]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
y = autograd.sign(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Sign_cpu(self):
self._Sign_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Sign_gpu(self):
self._Sign_helper(gpu_dev)
def _Div_helper(self, dev):
x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3,
2).astype(np.float32)
x0 = tensor.from_numpy(x0)
x1 = tensor.from_numpy(x1)
x0.to_device(dev)
x1.to_device(dev)
y = autograd.div(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Div_cpu(self):
self._Div_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Div_gpu(self):
self._Div_helper(gpu_dev)
def _Sub_helper(self, dev):
x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3,
2).astype(np.float32)
x0 = tensor.from_numpy(x0)
x1 = tensor.from_numpy(x1)
x0.to_device(dev)
x1.to_device(dev)
y = autograd.sub(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Sub_cpu(self):
self._Sub_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Sub_gpu(self):
self._Sub_helper(gpu_dev)
def _Sqrt_helper(self, dev):
X = np.array([0.1, 1.0, 0.4, 4.0, 0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.sqrt(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev, init_inputs=X)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Sqrt_cpu(self):
self._Sqrt_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Sqrt_gpu(self):
self._Sqrt_helper(gpu_dev)
def _Greater_helper(self, dev):
x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3,
2).astype(np.float32)
x0 = tensor.from_numpy(x0)
x1 = tensor.from_numpy(x1)
x0.to_device(cpu_dev)
x1.to_device(cpu_dev)
y = autograd.greater(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_Greater_cpu(self):
self._Greater_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_Greater_gpu(self):
self._Greater_helper(gpu_dev)
def _HardSigmoid_helper(self, dev):
x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
a = 0.2
g = 0.5
x = tensor.from_numpy(x)
x.to_device(dev)
y = autograd.hardsigmoid(x, a, g)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_HardSigmoid_cpu(self):
self._HardSigmoid_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_HardSigmoid_gpu(self):
self._HardSigmoid_helper(gpu_dev)
def _identity_helper(self, dev):
x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
x.to_device(dev)
y = autograd.identity(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_identity_cpu(self):
self._identity_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_identity_gpu(self):
self._identity_helper(gpu_dev)
def _softplus_helper(self, dev):
x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
x.to_device(dev)
y = autograd.softplus(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_softplus_cpu(self):
self._softplus_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_softplus_gpu(self):
self._softplus_helper(gpu_dev)
def _softsign_helper(self, dev):
x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
x.to_device(dev)
y = autograd.softsign(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_softsign_cpu(self):
self._softsign_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_softsign_gpu(self):
self._softsign_helper(gpu_dev)
def _mean_helper(self, dev):
x0 = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3,
2).astype(np.float32)
x0 = tensor.from_numpy(x0)
x1 = tensor.from_numpy(x1)
x0.to_device(dev)
x1.to_device(dev)
y = autograd.mean(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_mean_cpu(self):
self._mean_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_mean_gpu(self):
self._mean_helper(gpu_dev)
def _pow_helper(self, dev):
x0 = np.array([7, 5, 0.2, 0.1, 0.3, 4]).reshape(3, 2).astype(np.float32)
x1 = np.array([-1.0, 2.0, -1.0, -2.1, 1.0,
-2.0]).reshape(3, 2).astype(np.float32)
x0 = tensor.from_numpy(x0)
x1 = tensor.from_numpy(x1)
x0.to_device(dev)
x1.to_device(dev)
y = autograd.mean(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_pow_cpu(self):
self._pow_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_pow_gpu(self):
self._pow_helper(gpu_dev)
def _clip_helper(self, dev):
x = np.array([-0.9, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
min = -0.5
max = 0.5
x.to_device(dev)
y = autograd.clip(x, min, max)
# frontend
model = sonnx.to_onnx([x, min, max], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x]) # min, max has been stored in model
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_clip_cpu(self):
self._clip_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_clip_gpu(self):
self._clip_helper(gpu_dev)
def _prelu_helper(self, dev):
x = np.array([0.1, -1.0, -0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
slope = np.array([0.1, 1.0, 0.4, 4.0, 0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
slope = tensor.from_numpy(slope)
x.to_device(dev)
slope.to_device(dev)
y = autograd.prelu(x, slope)
# frontend
model = sonnx.to_onnx([x, slope], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x, slope])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_prelu_cpu(self):
self._prelu_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_prelu_gpu(self):
self._prelu_helper(gpu_dev)
def _mul_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x1 = np.array([0.1, 1.0, 0.4, 4.0, 0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
x1 = tensor.from_numpy(x1)
x.to_device(dev)
x1.to_device(dev)
y = autograd.mul(x, x1)
# frontend
model = sonnx.to_onnx([x, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_mul_cpu(self):
self._mul_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_mul_gpu(self):
self._mul_helper(gpu_dev)
def _transpose_helper(self, dev):
x = np.random.randn(3, 2, 1)
y = x.transpose(1, 2, 0)
x = tensor.from_numpy(x)
x.to_device(cpu_dev)
y = autograd.transpose(x, (1, 2, 0))
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_transpose_cpu(self):
self._transpose_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_transpose_gpu(self):
self._transpose_helper(gpu_dev)
def _max_helper(self, dev):
X0 = np.array([0.1, 0.2, 2.0, 0.0, 0.1,
0.2]).reshape(3, 2).astype(np.float32)
X1 = np.array([1.0, 2.0, 1.0, 2.1, 0.0,
2.0]).reshape(3, 2).astype(np.float32)
x0 = tensor.from_numpy(X0)
x1 = tensor.from_numpy(X1)
x0.to_device(dev)
x1.to_device(dev)
y = autograd.max(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_max_cpu(self):
self._max_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_max_gpu(self):
self._max_helper(gpu_dev)
def _min_helper(self, dev):
X0 = np.array([0.1, 0.2, 2.0, 0.0, 0.1,
0.2]).reshape(3, 2).astype(np.float32)
X1 = np.array([1.0, 2.0, 1.0, 2.1, 0.0,
2.0]).reshape(3, 2).astype(np.float32)
x0 = tensor.from_numpy(X0)
x1 = tensor.from_numpy(X1)
x0.to_device(dev)
x1.to_device(dev)
y = autograd.min(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_min_cpu(self):
self._min_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_min_gpu(self):
self._min_helper(gpu_dev)
def _shape_helper(self, dev):
x = np.array([0.1, -1.0, 0.4, 4.0, -0.9,
9.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
x.to_device(dev)
y = autograd.shape(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_shape_cpu(self):
self._shape_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_shape_gpu(self):
self._shape_helper(gpu_dev)
def _and_helper(self, dev):
x0 = np.array([0, -0.3, -0.1, 0.1, 0.5,
0.9]).reshape(3, 2).astype(np.float32)
x1 = np.array([0, -0.3, 0, 0.1, 0.5, 0.9]).reshape(3,
2).astype(np.float32)
x0 = tensor.from_numpy(x0)
x1 = tensor.from_numpy(x1)
x0.to_device(dev)
x1.to_device(dev)
y = autograd._and(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_and_cpu(self):
self._and_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_and_gpu(self):
self._and_helper(gpu_dev)
def _or_helper(self, dev):
x0 = np.array([1.0, 1.0, 2.0, -3.0, 0,
-7.0]).reshape(3, 2).astype(np.float32)
x1 = np.array([-1.0, 0, 2.0, 4.0, 0,
-7.0]).reshape(3, 2).astype(np.float32)
x0 = tensor.from_numpy(x0)
x1 = tensor.from_numpy(x1)
x0.to_device(dev)
x1.to_device(dev)
y = autograd._or(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_or_cpu(self):
self._or_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_or_gpu(self):
self._or_helper(gpu_dev)
def _xor_helper(self, dev):
x0 = np.array([0, -0.3, -0.1, 0.1, 0.5,
9.0]).reshape(3, 2).astype(np.float32)
x1 = np.array([0, -0.3, 0, 0.1, 0, 0.9]).reshape(3,
2).astype(np.float32)
x0 = tensor.from_numpy(x0)
x1 = tensor.from_numpy(x1)
x0.to_device(dev)
x1.to_device(dev)
y = autograd._xor(x0, x1)
# frontend
model = sonnx.to_onnx([x0, x1], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x0, x1])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_xor_cpu(self):
self._xor_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_xor_gpu(self):
self._xor_helper(gpu_dev)
def _not_helper(self, dev):
x = np.array([1.0, -1.0, 0, -0.1, 0,
-7.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(x)
x.to_device(dev)
y = autograd._not(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_not_cpu(self):
self._not_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_not_gpu(self):
self._not_helper(gpu_dev)
def _negative_helper(self, dev):
X = np.array([0.1, 0, 0.4, 1. - 4, 0.9,
-2.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.negative(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_negative_cpu(self):
self._negative_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_negative_gpu(self):
self._negative_helper(gpu_dev)
def _reciprocal_helper(self, dev):
X = np.array([0.1, 0, 0.4, 1. - 4, 0.9,
-2.0]).reshape(3, 2).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(cpu_dev)
y = autograd.reciprocal(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_reciprocal_cpu(self):
self._reciprocal_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_reciprocal_gpu(self):
self._reciprocal_helper(gpu_dev)
def _constantOfShape_helper(self, dev):
X = np.array([4, 3, 2]).astype(np.int64)
x = tensor.from_numpy(X)
x.to_device(cpu_dev)
y = autograd.constant_of_shape(x, 1.)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev, init_inputs=[X])
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(tensor.to_numpy(y),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_constantOfShape_cpu(self):
self._constantOfShape_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_constantOfShape_gpu(self):
self._constantOfShape_helper(gpu_dev)
def _dropout_helper(self, dev):
X = np.random.randn(3, 4, 5).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.dropout(x, 0.5)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
self.check_shape(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_dropout_cpu(self):
self._dropout_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_dropout_gpu(self):
self._dropout_helper(gpu_dev)
def _reduceSum_helper(self, dev):
X = np.random.randn(3, 4, 5).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.reduce_sum(x, None, 1)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_reduceSum_cpu(self):
self._reduceSum_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_reduceSum_gpu(self):
self._reduceSum_helper(gpu_dev)
def _reduceMean_helper(self, dev):
X = np.random.randn(3, 4, 5).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.reduce_mean(x, None, 1)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_reduceMean_cpu(self):
self._reduceMean_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_reduceMean_gpu(self):
self._reduceMean_helper(gpu_dev)
def _squeeze_helper(self, dev):
X = np.random.randn(3, 1, 2, 1, 1)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.squeeze(x, [1, 3, 4])
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_squeeze_cpu(self):
self._squeeze_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_squeeze_gpu(self):
self._squeeze_helper(gpu_dev)
def _unsqueeze_helper(self, dev):
X = np.random.randn(3, 2)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.unsqueeze(x, [2, 4, 5])
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_unsqueeze_cpu(self):
self._unsqueeze_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_unsqueeze_gpu(self):
self._unsqueeze_helper(gpu_dev)
def _slice_helper(self, dev):
X = np.random.randn(20, 10, 5).astype(np.float32)
starts, ends, axes, steps = [0, 0], [3, 10], [0, 1], [1, 1]
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.slice(x, starts, ends, axes, steps)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_slice_cpu(self):
self._slice_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_slice_gpu(self):
self._slice_helper(gpu_dev)
# # todo, we don't support muli outputs
# def _split_helper(self, dev):
# X = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32)
# x = tensor.from_numpy(X)
# x.to_device(dev)
# y = autograd.split(x, 0, (2, 4))
# # frontend
# model = sonnx.to_onnx([x], [*y])
# # print('The model is:\n{}'.format(model))
# # backend
# sg_ir = sonnx.prepare(model, device=dev)
# y_t = sg_ir.run([x])[0]
# np.testing.assert_array_almost_equal(tensor.to_numpy(y).shape, tensor.to_numpy(y_t).shape)
# def test_split_cpu(self):
# self._split_helper(cpu_dev)
# @unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
# def test_split_gpu(self):
# self._split_helper(gpu_dev)
def _gather_helper(self, dev):
X = np.array([0, 1, 2]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.gather(x, 0, [0, 1, 3])
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_gather_cpu(self):
self._gather_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_gather_gpu(self):
self._gather_helper(gpu_dev)
def _tile_helper(self, dev):
X = np.array([0, 1, 2]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.tile(x, [2, 2])
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_tile_cpu(self):
self._tile_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_tile_gpu(self):
self._tile_helper(gpu_dev)
def _nonzero_helper(self, dev):
X = np.array([[1, 0], [1, 1]]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.nonzero(x)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_nonzero_cpu(self):
self._nonzero_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_nonzero_gpu(self):
self._nonzero_helper(gpu_dev)
def _cast_helper(self, dev):
X = np.array([[1, 0], [1, 1]]).astype(np.float32)
x = tensor.from_numpy(X)
x.to_device(dev)
y = autograd.cast(x, tensor.int32)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
np.testing.assert_array_almost_equal(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_cast_cpu(self):
self._cast_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_cast_gpu(self):
self._cast_helper(gpu_dev)
def _onehot_helper(self, dev):
axisValue = 1
on_value = 3
off_value = 1
output_type = np.float32
indices = np.array([[1, 9], [2, 4]], dtype=np.float32)
depth = np.array([10], dtype=np.float32)
values = np.array([off_value, on_value], dtype=output_type)
x = tensor.from_numpy(indices)
x.to_device(dev)
y = autograd.onehot(axisValue, x, depth, values)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x])
self.check_shape(
tensor.to_numpy(y).shape,
tensor.to_numpy(y_t[0]).shape)
def test_onehot_cpu(self):
self._onehot_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_onehot_gpu(self):
self._onehot_helper(gpu_dev)
def _inference_helper(self, dev):
x = tensor.Tensor(shape=(2, 3, 3, 3), device=dev)
x.gaussian(0.0, 1.0)
x1 = autograd.Conv2d(3, 1, 2)(x)
y = autograd.Conv2d(1, 1, 2)(x1)
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
y_t = sg_ir.run([x], last_layers=-1)
np.testing.assert_array_almost_equal(tensor.to_numpy(x1),
tensor.to_numpy(y_t[0]),
decimal=5)
def test_inference_cpu(self):
self._inference_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_inference_gpu(self):
self._inference_helper(gpu_dev)
def _retraining_helper(self, dev):
# forward
x = tensor.Tensor(shape=(2, 3, 3, 3), device=dev)
x.gaussian(0.0, 1.0)
x1 = autograd.Conv2d(3, 1, 2)(x)
x2 = autograd.Conv2d(1, 1, 2)(x1)
y = autograd.Flatten()(x2)[0]
y_t = tensor.Tensor(shape=(2, 1), device=dev)
y_t.gaussian(0.0, 1.0)
loss = autograd.MeanSquareError()(y, y_t)[0]
# backward
sgd = opt.SGD(lr=0.01)
for p, gp in autograd.backward(loss):
sgd.update(p, gp)
sgd.step()
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
for idx, tens in sg_ir.tensor_map.items():
tens.requires_grad = True
tens.stores_grad = True
sg_ir.tensor_map[idx] = tens
# forward
y_o = sg_ir.run([x])[0]
# backward
loss = autograd.MeanSquareError()(y_o, y_t)[0]
sgd = opt.SGD(lr=0.01)
for p, gp in autograd.backward(loss):
sgd.update(p, gp)
sgd.step()
def test_retraining_cpu(self):
self._retraining_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_retraining_gpu(self):
self._retraining_helper(gpu_dev)
def _transfer_learning_helper(self, dev):
# forward
x = tensor.Tensor(shape=(2, 3, 3, 3), device=dev)
x.gaussian(0.0, 1.0)
x1 = autograd.Conv2d(3, 1, 2)(x)
y = autograd.Flatten()(x1)[0]
y_t = tensor.Tensor(shape=(2, 4), device=dev)
y_t.gaussian(0.0, 1.0)
loss = autograd.MeanSquareError()(y, y_t)[0]
# backward
sgd = opt.SGD(lr=0.01)
for p, gp in autograd.backward(loss):
sgd.update(p, gp)
sgd.step()
# frontend
model = sonnx.to_onnx([x], [y])
# print('The model is:\n{}'.format(model))
# backend
sg_ir = sonnx.prepare(model, device=dev)
# forward
x1 = sg_ir.run([x], last_layers=-1)[0]
x2 = autograd.Conv2d(1, 1, 2)(x1)
y_o = autograd.Flatten()(x2)[0]
# backward
y_ot = tensor.Tensor(shape=(2, 1), device=dev)
y_ot.gaussian(0.0, 1.0)
loss = autograd.MeanSquareError()(y_o, y_ot)[0]
sgd = opt.SGD(lr=0.01)
for p, gp in autograd.backward(loss):
sgd.update(p, gp)
sgd.step()
def test_transfer_learning_cpu(self):
self._transfer_learning_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_transfer_learning_gpu(self):
self._transfer_learning_helper(gpu_dev)
if __name__ == '__main__':
unittest.main()
|
nudles/incubator-singa
|
test/python/test_onnx.py
|
Python
|
apache-2.0
| 64,684
|
[
"Gaussian"
] |
7b3a2bfb10397071844334266179f68e450b1ad20e71c08228e9c5b6490115f5
|
# -*- coding: utf-8 -*-
"""
sphinx.writers.texinfo
~~~~~~~~~~~~~~~~~~~~~~
Custom docutils writer for Texinfo.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import string
import textwrap
from os import path
from docutils import nodes, writers
from sphinx import addnodes, __version__
from sphinx.locale import versionlabels, _
from sphinx.util import ustrftime
from sphinx.writers.latex import collected_footnote
COPYING = """\
@quotation
%(project)s %(release)s, %(date)s
%(author)s
Copyright @copyright{} %(copyright)s
@end quotation
"""
TEMPLATE = """\
\\input texinfo @c -*-texinfo-*-
@c %%**start of header
@setfilename %(filename)s
@documentencoding UTF-8
@ifinfo
@*Generated by Sphinx """ + __version__ + """.@*
@end ifinfo
@settitle %(title)s
@defindex ge
@paragraphindent %(paragraphindent)s
@exampleindent %(exampleindent)s
@afourlatex
%(direntry)s
@c %%**end of header
@copying
%(copying)s
@end copying
@titlepage
@title %(title)s
@insertcopying
@end titlepage
@contents
@c %%** start of user preamble
%(preamble)s
@c %%** end of user preamble
@ifnottex
@node Top
@top %(title)s
@insertcopying
@end ifnottex
@c %%**start of body
%(body)s
@c %%**end of body
@bye
"""
def find_subsections(section):
"""Return a list of subsections for the given ``section``."""
result = []
for child in section.children:
if isinstance(child, nodes.section):
result.append(child)
continue
result.extend(find_subsections(child))
return result
class TexinfoWriter(writers.Writer):
"""Texinfo writer for generating Texinfo documents."""
supported = ('texinfo', 'texi')
settings_spec = (
'Texinfo Specific Options', None, (
("Name of the Info file", ['--texinfo-filename'], {'default': ''}),
('Dir entry', ['--texinfo-dir-entry'], {'default': ''}),
('Description', ['--texinfo-dir-description'], {'default': ''}),
('Category', ['--texinfo-dir-category'], {'default':
'Miscellaneous'})))
settings_defaults = {}
output = None
visitor_attributes = ('output', 'fragment')
def __init__(self, builder):
writers.Writer.__init__(self)
self.builder = builder
def translate(self):
self.visitor = visitor = TexinfoTranslator(self.document, self.builder)
self.document.walkabout(visitor)
visitor.finish()
for attr in self.visitor_attributes:
setattr(self, attr, getattr(visitor, attr))
class TexinfoTranslator(nodes.NodeVisitor):
ignore_missing_images = False
default_elements = {
'author': '',
'body': '',
'copying': '',
'date': '',
'direntry': '',
'exampleindent': 4,
'filename': '',
'paragraphindent': 2,
'preamble': '',
'project': '',
'release': '',
'title': '',
}
def __init__(self, document, builder):
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
self.init_settings()
self.written_ids = set() # node names and anchors in output
self.referenced_ids = set() # node names and anchors that should
# be in output
self.indices = [] # (node name, content)
self.short_ids = {} # anchors --> short ids
self.node_names = {} # node name --> node's name to display
self.node_menus = {} # node name --> node's menu entries
self.rellinks = {} # node name --> (next, previous, up)
self.collect_indices()
self.collect_node_names()
self.collect_node_menus()
self.collect_rellinks()
self.body = []
self.context = []
self.previous_section = None
self.section_level = 0
self.seen_title = False
self.next_section_ids = set()
self.escape_newlines = 0
self.curfilestack = []
self.footnotestack = []
self.in_footnote = 0
self.handled_abbrs = set()
def finish(self):
if self.previous_section is None:
self.add_menu('Top')
for index in self.indices:
name, content = index
pointers = tuple([name] + self.rellinks[name])
self.body.append('\n@node %s,%s,%s,%s\n' % pointers)
self.body.append('@unnumbered %s\n\n%s\n' % (name, content))
while self.referenced_ids:
# handle xrefs with missing anchors
r = self.referenced_ids.pop()
if r not in self.written_ids:
self.body.append('@anchor{%s}@w{%s}\n' % (r, ' ' * 30))
self.fragment = ''.join(self.body).strip() + '\n'
self.elements['body'] = self.fragment
self.output = TEMPLATE % self.elements
## Helper routines
def init_settings(self):
settings = self.settings = self.document.settings
elements = self.elements = self.default_elements.copy()
elements.update({
# if empty, the title is set to the first section title
'title': settings.title,
'author': settings.author,
# if empty, use basename of input file
'filename': settings.texinfo_filename,
'release': self.escape(self.builder.config.release),
'project': self.escape(self.builder.config.project),
'copyright': self.escape(self.builder.config.copyright),
'date': self.escape(self.builder.config.today or
ustrftime(self.builder.config.today_fmt
or _('%B %d, %Y')))
})
# title
title = elements['title']
if not title:
title = self.document.next_node(nodes.title)
title = (title and title.astext()) or '<untitled>'
elements['title'] = self.escape_id(title) or '<untitled>'
# filename
if not elements['filename']:
elements['filename'] = self.document.get('source') or 'untitled'
if elements['filename'][-4:] in ('.txt', '.rst'):
elements['filename'] = elements['filename'][:-4]
elements['filename'] += '.info'
# direntry
if settings.texinfo_dir_entry:
entry = self.format_menu_entry(
self.escape_menu(settings.texinfo_dir_entry),
'(%s)' % elements['filename'],
self.escape_arg(settings.texinfo_dir_description))
elements['direntry'] = ('@dircategory %s\n'
'@direntry\n'
'%s'
'@end direntry\n') % (
self.escape_id(settings.texinfo_dir_category), entry)
elements['copying'] = COPYING % elements
# allow the user to override them all
elements.update(settings.texinfo_elements)
def collect_node_names(self):
"""Generates a unique id for each section.
Assigns the attribute ``node_name`` to each section."""
# must have a "Top" node
self.document['node_name'] = 'Top'
self.node_names['Top'] = 'Top'
self.written_ids.update(('Top', 'top'))
# each index is a node
for name, content in self.indices:
self.node_names[name] = name
self.written_ids.add(name)
# each section is also a node
for section in self.document.traverse(nodes.section):
title = section.next_node(nodes.Titular)
name = (title and title.astext()) or '<untitled>'
node_id = self.escape_id(name) or '<untitled>'
assert node_id and name
nth, suffix = 1, ''
while node_id + suffix in self.written_ids:
nth += 1
suffix = '<%s>' % nth
node_id += suffix
assert node_id not in self.node_names
assert node_id not in self.written_ids
section['node_name'] = node_id
self.node_names[node_id] = name
self.written_ids.add(node_id)
def collect_node_menus(self):
"""Collect the menu entries for each "node" section."""
node_menus = self.node_menus
for node in ([self.document] +
self.document.traverse(nodes.section)):
assert 'node_name' in node and node['node_name']
entries = [s['node_name'] for s in find_subsections(node)]
node_menus[node['node_name']] = entries
# try to find a suitable "Top" node
title = self.document.next_node(nodes.title)
top = (title and title.parent) or self.document
if not isinstance(top, (nodes.document, nodes.section)):
top = self.document
if top is not self.document:
entries = node_menus[top['node_name']]
entries += node_menus['Top'][1:]
node_menus['Top'] = entries
del node_menus[top['node_name']]
top['node_name'] = 'Top'
# handle the indices
for name, content in self.indices:
node_menus[name] = ()
node_menus['Top'].append(name)
def collect_rellinks(self):
"""Collect the relative links (next, previous, up) for each "node"."""
rellinks = self.rellinks
node_menus = self.node_menus
for id, entries in list(node_menus.items()):
rellinks[id] = ['', '', '']
# up's
for id, entries in list(node_menus.items()):
for e in entries:
rellinks[e][2] = id
# next's and prev's
for id, entries in list(node_menus.items()):
for i, id in enumerate(entries):
# First child's prev is empty
if i != 0:
rellinks[id][1] = entries[i-1]
# Last child's next is empty
if i != len(entries) - 1:
rellinks[id][0] = entries[i+1]
# top's next is its first child
try:
first = node_menus['Top'][0]
except IndexError:
pass
else:
rellinks['Top'][0] = first
rellinks[first][1] = 'Top'
## Escaping
# Which characters to escape depends on the context. In some cases,
# namely menus and node names, it's not possible to escape certain
# characters.
def escape(self, s):
"""Return a string with Texinfo command characters escaped."""
s = s.replace('@', '@@')
s = s.replace('{', '@{')
s = s.replace('}', '@}')
# prevent `` and '' quote conversion
s = s.replace('``', "`@w{`}")
s = s.replace("''", "'@w{'}")
# prevent "--" from being converted to an "em dash"
# s = s.replace('-', '@w{-}')
return s
def escape_arg(self, s):
"""Return an escaped string suitable for use as an argument
to a Texinfo command."""
s = self.escape(s)
# commas are the argument delimeters
s = s.replace(',', '@comma{}')
# normalize white space
s = ' '.join(s.split()).strip()
return s
def escape_id(self, s):
"""Return an escaped string suitable for node names and anchors."""
bad_chars = ',:.()'
for bc in bad_chars:
s = s.replace(bc, ' ')
s = ' '.join(s.split()).strip()
return self.escape(s)
def escape_menu(self, s):
"""Return an escaped string suitable for menu entries."""
s = self.escape_arg(s)
s = s.replace(':', ';')
s = ' '.join(s.split()).strip()
return s
def ensure_eol(self):
"""Ensure the last line in body is terminated by new line."""
if self.body and self.body[-1][-1:] != '\n':
self.body.append('\n')
def format_menu_entry(self, name, node_name, desc):
if name == node_name:
s = '* %s:: ' % (name,)
else:
s = '* %s: %s. ' % (name, node_name)
offset = max((24, (len(name) + 4) % 78))
wdesc = '\n'.join(' ' * offset + l for l in
textwrap.wrap(desc, width=78-offset))
return s + wdesc.strip() + '\n'
def add_menu_entries(self, entries, reg=re.compile(r'\s+---?\s+')):
for entry in entries:
name = self.node_names[entry]
# special formatting for entries that are divided by an em-dash
parts = reg.split(name, 1)
if len(parts) == 2:
name, desc = parts
else:
desc = ''
name = self.escape_menu(name)
desc = self.escape(desc)
self.body.append(self.format_menu_entry(name, entry, desc))
def add_menu(self, node_name):
entries = self.node_menus[node_name]
if not entries:
return
self.body.append('\n@menu\n')
self.add_menu_entries(entries)
if not self.node_menus[entries[0]]:
self.body.append('\n@end menu\n')
return
def _add_detailed_menu(name):
entries = self.node_menus[name]
if not entries:
return
self.body.append('\n%s\n\n' % (self.escape(self.node_names[name],)))
self.add_menu_entries(entries)
for subentry in entries:
_add_detailed_menu(subentry)
if node_name == 'Top':
self.body.append('\n@detailmenu\n'
' --- The Detailed Node Listing ---\n')
for entry in entries:
_add_detailed_menu(entry)
if node_name == 'Top':
self.body.append('\n@end detailmenu')
self.body.append('\n@end menu\n\n')
def tex_image_length(self, width_str):
match = re.match('(\d*\.?\d*)\s*(\S*)', width_str)
if not match:
# fallback
return width_str
res = width_str
amount, unit = match.groups()[:2]
if not unit or unit == "px":
# pixels: let TeX alone
return ''
elif unit == "%":
# a4paper: textwidth=418.25368pt
res = "%d.0pt" % (float(amount) * 4.1825368)
return res
def collect_indices(self):
def generate(content, collapsed):
ret = ['\n@menu\n']
for letter, entries in content:
for entry in entries:
if not entry[3]:
continue
name = self.escape_menu(entry[0])
sid = self.get_short_id('%s:%s' % (entry[2], entry[3]))
desc = self.escape_arg(entry[6])
me = self.format_menu_entry(name, sid, desc)
ret.append(me)
ret.append('@end menu\n')
return ''.join(ret)
indices_config = self.builder.config.texinfo_domain_indices
if indices_config:
for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
node_name = self.escape_id(indexcls.localname)
self.indices.append((node_name,
generate(content, collapsed)))
self.indices.append((_('Index'), '\n@printindex ge\n'))
# this is copied from the latex writer
# TODO: move this to sphinx.util
def collect_footnotes(self, node):
fnotes = {}
def footnotes_under(n):
if isinstance(n, nodes.footnote):
yield n
else:
for c in n.children:
if isinstance(c, addnodes.start_of_file):
continue
for k in footnotes_under(c):
yield k
for fn in footnotes_under(node):
num = fn.children[0].astext().strip()
fnotes[num] = [collected_footnote(*fn.children), False]
return fnotes
## xref handling
def get_short_id(self, id):
"""Return a shorter 'id' associated with ``id``."""
# Shorter ids improve paragraph filling in places
# that the id is hidden by Emacs.
try:
sid = self.short_ids[id]
except KeyError:
sid = hex(len(self.short_ids))[2:]
self.short_ids[id] = sid
return sid
def add_anchor(self, id, node):
if id.startswith('index-'):
return
id = self.curfilestack[-1] + ':' + id
eid = self.escape_id(id)
sid = self.get_short_id(id)
for id in (eid, sid):
if id not in self.written_ids:
self.body.append('@anchor{%s}' % id)
self.written_ids.add(id)
def add_xref(self, id, name, node):
name = self.escape_menu(name)
sid = self.get_short_id(id)
self.body.append('@pxref{%s,,%s}' % (sid, name))
self.referenced_ids.add(sid)
self.referenced_ids.add(self.escape_id(id))
## Visiting
def visit_document(self, node):
self.footnotestack.append(self.collect_footnotes(node))
self.curfilestack.append(node.get('docname', ''))
if 'docname' in node:
self.add_anchor(':doc', node)
def depart_document(self, node):
self.footnotestack.pop()
self.curfilestack.pop()
def visit_Text(self, node):
s = self.escape(node.astext())
if self.escape_newlines:
s = s.replace('\n', ' ')
self.body.append(s)
def depart_Text(self, node):
pass
def visit_section(self, node):
self.next_section_ids.update(node.get('ids', []))
if not self.seen_title:
return
if self.previous_section:
self.add_menu(self.previous_section['node_name'])
else:
self.add_menu('Top')
node_name = node['node_name']
pointers = tuple([node_name] + self.rellinks[node_name])
self.body.append('\n@node %s,%s,%s,%s\n' % pointers)
for id in self.next_section_ids:
self.add_anchor(id, node)
self.next_section_ids.clear()
self.previous_section = node
self.section_level += 1
def depart_section(self, node):
self.section_level -= 1
headings = (
'@unnumbered',
'@chapter',
'@section',
'@subsection',
'@subsubsection',
)
rubrics = (
'@heading',
'@subheading',
'@subsubheading',
)
def visit_title(self, node):
if not self.seen_title:
self.seen_title = 1
raise nodes.SkipNode
parent = node.parent
if isinstance(parent, nodes.table):
return
if isinstance(parent, (nodes.Admonition, nodes.sidebar, nodes.topic)):
raise nodes.SkipNode
elif not isinstance(parent, nodes.section):
self.builder.warn(
'encountered title node not in section, topic, table, '
'admonition or sidebar', (self.curfilestack[-1], node.line))
self.visit_rubric(node)
else:
try:
heading = self.headings[self.section_level]
except IndexError:
heading = self.headings[-1]
self.body.append('\n%s ' % heading)
def depart_title(self, node):
self.body.append('\n\n')
def visit_rubric(self, node):
if len(node.children) == 1 and node.children[0].astext() in \
('Footnotes', _('Footnotes')):
raise nodes.SkipNode
try:
rubric = self.rubrics[self.section_level]
except IndexError:
rubric = self.rubrics[-1]
self.body.append('\n%s ' % rubric)
def depart_rubric(self, node):
self.body.append('\n\n')
def visit_subtitle(self, node):
self.body.append('\n\n@noindent\n')
def depart_subtitle(self, node):
self.body.append('\n\n')
## References
def visit_target(self, node):
# postpone the labels until after the sectioning command
parindex = node.parent.index(node)
try:
try:
next = node.parent[parindex+1]
except IndexError:
# last node in parent, look at next after parent
# (for section of equal level)
next = node.parent.parent[node.parent.parent.index(node.parent)]
if isinstance(next, nodes.section):
if node.get('refid'):
self.next_section_ids.add(node['refid'])
self.next_section_ids.update(node['ids'])
return
except IndexError:
pass
if 'refuri' in node:
return
if node.get('refid'):
self.add_anchor(node['refid'], node)
for id in node['ids']:
self.add_anchor(id, node)
def depart_target(self, node):
pass
def visit_reference(self, node):
# an xref's target is displayed in Info so we ignore a few
# cases for the sake of appearance
if isinstance(node.parent, (nodes.title, addnodes.desc_type)):
return
if isinstance(node[0], nodes.image):
return
name = node.get('name', node.astext()).strip()
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if not uri:
return
if uri.startswith('mailto:'):
uri = self.escape_arg(uri[7:])
name = self.escape_arg(name)
if not name or name == uri:
self.body.append('@email{%s}' % uri)
else:
self.body.append('@email{%s,%s}' % (uri, name))
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.add_xref(id, name, node)
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.add_xref(id, name, node)
elif uri.startswith('info:'):
# references to an external Info file
uri = uri[5:].replace('_', ' ')
uri = self.escape_arg(uri)
id = 'Top'
if '#' in uri:
uri, id = uri.split('#', 1)
id = self.escape_id(id)
name = self.escape_menu(name)
if name == id:
self.body.append('@pxref{%s,,,%s}' % (id, uri))
else:
self.body.append('@pxref{%s,,%s,%s}' % (id, name, uri))
else:
uri = self.escape_arg(uri)
name = self.escape_arg(name)
show_urls = self.builder.config.texinfo_show_urls
if self.in_footnote:
show_urls = 'inline'
if not name or uri == name:
self.body.append('@indicateurl{%s}' % uri)
elif show_urls == 'inline':
self.body.append('@uref{%s,%s}' % (uri, name))
elif show_urls == 'no':
self.body.append('@uref{%s,,%s}' % (uri, name))
else:
self.body.append('%s@footnote{%s}' % (name, uri))
raise nodes.SkipNode
def depart_reference(self, node):
pass
def visit_title_reference(self, node):
text = node.astext()
self.body.append('@cite{%s}' % self.escape_arg(text))
raise nodes.SkipNode
## Blocks
def visit_paragraph(self, node):
if 'continued' in node or isinstance(node.parent, nodes.compound):
self.body.append('\n@noindent')
self.body.append('\n')
def depart_paragraph(self, node):
self.body.append('\n')
def visit_block_quote(self, node):
self.body.append('\n@quotation\n')
def depart_block_quote(self, node):
self.ensure_eol()
self.body.append('@end quotation\n')
def visit_literal_block(self, node):
self.body.append('\n@example\n')
def depart_literal_block(self, node):
self.body.append('\n@end example\n\n'
'@noindent\n')
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line_block(self, node):
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
self.body.append('@display\n')
def depart_line_block(self, node):
self.body.append('@end display\n')
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
def visit_line(self, node):
self.escape_newlines += 1
def depart_line(self, node):
self.body.append('@w{ }\n')
self.escape_newlines -= 1
## Inline
def visit_strong(self, node):
self.body.append('@strong{')
def depart_strong(self, node):
self.body.append('}')
def visit_emphasis(self, node):
self.body.append('@emph{')
def depart_emphasis(self, node):
self.body.append('}')
def visit_literal(self, node):
self.body.append('@code{')
def depart_literal(self, node):
self.body.append('}')
def visit_superscript(self, node):
self.body.append('@w{^')
def depart_superscript(self, node):
self.body.append('}')
def visit_subscript(self, node):
self.body.append('@w{[')
def depart_subscript(self, node):
self.body.append(']}')
## Footnotes
def visit_footnote(self, node):
raise nodes.SkipNode
def visit_collected_footnote(self, node):
self.in_footnote += 1
self.body.append('@footnote{')
def depart_collected_footnote(self, node):
self.body.append('}')
self.in_footnote -= 1
def visit_footnote_reference(self, node):
num = node.astext().strip()
try:
footnode, used = self.footnotestack[-1][num]
except (KeyError, IndexError):
raise nodes.SkipNode
# footnotes are repeated for each reference
footnode.walkabout(self)
raise nodes.SkipChildren
def visit_citation(self, node):
for id in node.get('ids'):
self.add_anchor(id, node)
def depart_citation(self, node):
pass
def visit_citation_reference(self, node):
self.body.append('@w{[')
def depart_citation_reference(self, node):
self.body.append(']}')
## Lists
def visit_bullet_list(self, node):
bullet = node.get('bullet', '*')
self.body.append('\n\n@itemize %s\n' % bullet)
def depart_bullet_list(self, node):
self.ensure_eol()
self.body.append('@end itemize\n')
def visit_enumerated_list(self, node):
# doesn't support Roman numerals
enum = node.get('enumtype', 'arabic')
starters = {'arabic': '',
'loweralpha': 'a',
'upperalpha': 'A',}
start = node.get('start', starters.get(enum, ''))
self.body.append('\n\n@enumerate %s\n' % start)
def depart_enumerated_list(self, node):
self.ensure_eol()
self.body.append('@end enumerate\n')
def visit_list_item(self, node):
self.body.append('\n@item ')
def depart_list_item(self, node):
pass
## Option List
def visit_option_list(self, node):
self.body.append('\n\n@table @option\n')
def depart_option_list(self, node):
self.ensure_eol()
self.body.append('@end table\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_group(self, node):
self.at_item_x = '@item'
def depart_option_group(self, node):
pass
def visit_option(self, node):
self.body.append('\n%s ' % self.at_item_x)
self.at_item_x = '@itemx'
def depart_option(self, node):
pass
def visit_option_string(self, node):
pass
def depart_option_string(self, node):
pass
def visit_option_argument(self, node):
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_description(self, node):
self.body.append('\n')
def depart_description(self, node):
pass
## Definitions
def visit_definition_list(self, node):
self.body.append('\n\n@table @asis\n')
def depart_definition_list(self, node):
self.ensure_eol()
self.body.append('@end table\n')
def visit_definition_list_item(self, node):
self.at_item_x = '@item'
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
for id in node.get('ids'):
self.add_anchor(id, node)
# anchors and indexes need to go in front
for n in node[::]:
if isinstance(n, (addnodes.index, nodes.target)):
n.walkabout(self)
node.remove(n)
self.body.append('\n%s ' % self.at_item_x)
self.at_item_x = '@itemx'
def depart_term(self, node):
pass
def visit_termsep(self, node):
self.body.append('\n%s ' % self.at_item_x)
def depart_termsep(self, node):
pass
def visit_classifier(self, node):
self.body.append(' : ')
def depart_classifier(self, node):
pass
def visit_definition(self, node):
self.body.append('\n')
def depart_definition(self, node):
pass
## Tables
def visit_table(self, node):
self.entry_sep = '@item'
def depart_table(self, node):
self.body.append('\n@end multitable\n\n')
def visit_tabular_col_spec(self, node):
pass
def depart_tabular_col_spec(self, node):
pass
def visit_colspec(self, node):
self.colwidths.append(node['colwidth'])
if len(self.colwidths) != self.n_cols:
return
self.body.append('\n\n@multitable ')
for i, n in enumerate(self.colwidths):
self.body.append('{%s} ' %('x' * (n+2)))
def depart_colspec(self, node):
pass
def visit_tgroup(self, node):
self.colwidths = []
self.n_cols = node['cols']
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.entry_sep = '@headitem'
def depart_thead(self, node):
pass
def visit_tbody(self, node):
pass
def depart_tbody(self, node):
pass
def visit_row(self, node):
pass
def depart_row(self, node):
self.entry_sep = '@item'
def visit_entry(self, node):
self.body.append('\n%s\n' % self.entry_sep)
self.entry_sep = '@tab'
def depart_entry(self, node):
for i in range(node.get('morecols', 0)):
self.body.append('\n@tab\n')
## Field Lists
def visit_field_list(self, node):
self.body.append('\n\n@itemize @w\n')
def depart_field_list(self, node):
self.ensure_eol()
self.body.append('@end itemize\n')
def visit_field(self, node):
if not isinstance(node.parent, nodes.field_list):
self.visit_field_list(node)
def depart_field(self, node):
if not isinstance(node.parent, nodes.field_list):
self.depart_field_list(node)
def visit_field_name(self, node):
self.body.append('\n@item ')
def depart_field_name(self, node):
self.body.append(': ')
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
## Admonitions
def visit_admonition(self, node, name=''):
if not name:
name = self.escape(node[0].astext())
self.body.append('\n@cartouche\n'
'@quotation %s ' % name)
def depart_admonition(self, node):
self.ensure_eol()
self.body.append('@end quotation\n'
'@end cartouche\n')
def _make_visit_admonition(typ):
def visit(self, node):
self.visit_admonition(node, self.escape(_(typ)))
return visit
visit_attention = _make_visit_admonition('Attention')
depart_attention = depart_admonition
visit_caution = _make_visit_admonition('Caution')
depart_caution = depart_admonition
visit_danger = _make_visit_admonition('Danger')
depart_danger = depart_admonition
visit_error = _make_visit_admonition('Error')
depart_error = depart_admonition
visit_important = _make_visit_admonition('Important')
depart_important = depart_admonition
visit_note = _make_visit_admonition('Note')
depart_note = depart_admonition
visit_tip = _make_visit_admonition('Tip')
depart_tip = depart_admonition
visit_hint = _make_visit_admonition('Hint')
depart_hint = depart_admonition
visit_warning = _make_visit_admonition('Warning')
depart_warning = depart_admonition
## Misc
def visit_docinfo(self, node):
raise nodes.SkipNode
def visit_generated(self, node):
raise nodes.SkipNode
def visit_header(self, node):
raise nodes.SkipNode
def visit_footer(self, node):
raise nodes.SkipNode
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
def visit_topic(self, node):
# ignore TOC's since we have to have a "menu" anyway
if 'contents' in node.get('classes', []):
raise nodes.SkipNode
title = node[0]
self.visit_rubric(title)
self.body.append('%s\n' % self.escape(title.astext()))
def depart_topic(self, node):
pass
def visit_transition(self, node):
self.body.append('\n\n@exdent @w{ %s}\n\n' % ('* ' * 30))
def depart_transition(self, node):
pass
def visit_attribution(self, node):
self.body.append('\n\n@center --- ')
def depart_attribution(self, node):
self.body.append('\n\n')
def visit_raw(self, node):
format = node.get('format', '').split()
if 'texinfo' in format or 'texi' in format:
self.body.append(node.astext())
raise nodes.SkipNode
def visit_figure(self, node):
self.body.append('\n\n@float Figure\n')
def depart_figure(self, node):
self.body.append('\n@end float\n\n')
def visit_caption(self, node):
if not isinstance(node.parent, nodes.figure):
self.builder.warn('caption not inside a figure.',
(self.curfilestack[-1], node.line))
return
self.body.append('\n@caption{')
def depart_caption(self, node):
if isinstance(node.parent, nodes.figure):
self.body.append('}\n')
def visit_image(self, node):
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
name, ext = path.splitext(uri)
attrs = node.attributes
# width and height ignored in non-tex output
width = self.tex_image_length(attrs.get('width', ''))
height = self.tex_image_length(attrs.get('height', ''))
alt = self.escape_arg(attrs.get('alt', ''))
self.body.append('\n@image{%s,%s,%s,%s,%s}\n' %
(name, width, height, alt, ext[1:]))
def depart_image(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_sidebar(self, node):
self.visit_topic(node)
def depart_sidebar(self, node):
self.depart_topic(node)
def visit_label(self, node):
self.body.append('@w{(')
def depart_label(self, node):
self.body.append(')} ')
def visit_legend(self, node):
pass
def depart_legend(self, node):
pass
def visit_substitution_reference(self, node):
pass
def depart_substitution_reference(self, node):
pass
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_system_message(self, node):
self.body.append('\n@w{----------- System Message: %s/%s -----------} '
'(%s, line %s)\n' % (
node.get('type', '?'),
node.get('level', '?'),
self.escape(node.get('source', '?')),
node.get('line', '?')))
def depart_system_message(self, node):
pass
def visit_comment(self, node):
self.body.append('\n')
for line in node.astext().splitlines():
self.body.append('@c %s\n' % line)
raise nodes.SkipNode
def visit_problematic(self, node):
self.body.append('>')
def depart_problematic(self, node):
self.body.append('<')
def unimplemented_visit(self, node):
self.builder.warn("unimplemented node type: %r" % node,
(self.curfilestack[-1], node.line))
def unknown_visit(self, node):
self.builder.warn("unknown node type: %r" % node,
(self.curfilestack[-1], node.line))
def unknown_departure(self, node):
pass
### Sphinx specific
def visit_productionlist(self, node):
self.visit_literal_block(None)
names = []
for production in node:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
for production in node:
if production['tokenname']:
for id in production.get('ids'):
self.add_anchor(id, production)
s = production['tokenname'].ljust(maxlen) + ' ::='
lastname = production['tokenname']
else:
s = '%s ' % (' '*maxlen)
self.body.append(self.escape(s))
self.body.append(self.escape(production.astext() + '\n'))
self.depart_literal_block(None)
raise nodes.SkipNode
def visit_production(self, node):
pass
def depart_production(self, node):
pass
def visit_literal_emphasis(self, node):
self.body.append('@code{')
def depart_literal_emphasis(self, node):
self.body.append('}')
def visit_index(self, node):
# terminate the line but don't prevent paragraph breaks
if isinstance(node.parent, nodes.paragraph):
self.ensure_eol()
else:
self.body.append('\n')
for entry in node['entries']:
typ, text, tid, text2 = entry
text = self.escape_menu(text)
self.body.append('@geindex %s\n' % text)
def visit_refcount(self, node):
self.body.append('\n')
def depart_refcount(self, node):
self.body.append('\n')
def visit_versionmodified(self, node):
intro = versionlabels[node['type']] % node['version']
if node.children:
intro += ': '
else:
intro += '.'
self.body.append('\n%s' % self.escape(intro))
def depart_versionmodified(self, node):
self.body.append('\n')
def visit_start_of_file(self, node):
# add a document target
self.next_section_ids.add(':doc')
self.curfilestack.append(node['docname'])
self.footnotestack.append(self.collect_footnotes(node))
def depart_start_of_file(self, node):
self.curfilestack.pop()
self.footnotestack.pop()
def visit_centered(self, node):
txt = self.escape_arg(node.astext())
self.body.append('\n\n@center %s\n\n' % txt)
raise nodes.SkipNode
def visit_seealso(self, node):
self.visit_topic(node)
def depart_seealso(self, node):
self.depart_topic(node)
def visit_meta(self, node):
raise nodes.SkipNode
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_acks(self, node):
self.body.append('\n\n')
self.body.append(', '.join(n.astext()
for n in node.children[0].children) + '.')
self.body.append('\n\n')
raise nodes.SkipNode
def visit_highlightlang(self, node):
pass
def depart_highlightlang(self, node):
pass
## Desc
def visit_desc(self, node):
self.at_deffnx = '@deffn'
def depart_desc(self, node):
self.ensure_eol()
self.body.append('@end deffn\n')
def visit_desc_signature(self, node):
objtype = node.parent['objtype']
if objtype != 'describe':
for id in node.get('ids'):
self.add_anchor(id, node)
# use the full name of the objtype for the category
try:
domain = self.builder.env.domains[node.parent['domain']]
primary = self.builder.config.primary_domain
name = domain.get_type_name(domain.object_types[objtype],
primary == domain.name)
except KeyError:
name = objtype
category = self.escape_arg(string.capwords(name))
self.body.append('\n%s {%s} ' % (self.at_deffnx, category))
self.at_deffnx = '@deffnx'
def depart_desc_signature(self, node):
self.body.append("\n")
def visit_desc_name(self, node):
pass
def depart_desc_name(self, node):
pass
def visit_desc_addname(self, node):
pass
def depart_desc_addname(self, node):
pass
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.body.append(' -> ')
def depart_desc_returns(self, node):
pass
def visit_desc_parameterlist(self, node):
self.body.append(' (')
self.first_param = 1
def depart_desc_parameterlist(self, node):
self.body.append(')')
def visit_desc_parameter(self, node):
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
text = self.escape(node.astext())
# replace no-break spaces with normal ones
text = text.replace(' ', '@w{ }')
self.body.append(text)
raise nodes.SkipNode
def visit_desc_optional(self, node):
self.body.append('[')
def depart_desc_optional(self, node):
self.body.append(']')
def visit_desc_annotation(self, node):
raise nodes.SkipNode
def visit_desc_content(self, node):
pass
def depart_desc_content(self, node):
pass
def visit_inline(self, node):
pass
def depart_inline(self, node):
pass
def visit_abbreviation(self, node):
abbr = node.astext()
self.body.append('@abbr{')
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
self.context.append(',%s}' % self.escape_arg(node['explanation']))
self.handled_abbrs.add(abbr)
else:
self.context.append('}')
def depart_abbreviation(self, node):
self.body.append(self.context.pop())
def visit_download_reference(self, node):
pass
def depart_download_reference(self, node):
pass
def visit_hlist(self, node):
self.visit_bullet_list(node)
def depart_hlist(self, node):
self.depart_bullet_list(node)
def visit_hlistcol(self, node):
pass
def depart_hlistcol(self, node):
pass
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
|
superdesk/Live-Blog
|
documentor/libraries/Sphinx-1.1.3-py3.2/sphinx/writers/texinfo.py
|
Python
|
agpl-3.0
| 44,657
|
[
"VisIt"
] |
efb71ed98281630fbd1f0cd2fd6b55d8b9ea0cfee2cea077944b491c20ada06d
|
#!/usr/bin/python
import pysam
import string
import argparse
# The MIT License (MIT)
# Copyright (c) [2014] [Peter Hickey (peter.hickey@gmail.com)]
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
### Program description ###
############################################################################################################################################################################################
# Convert the H9_Laurent MethylC-Seq mapped reads files from the Lister et al. 2011 (Nature) paper (downloaded from http://neomorph.salk.edu/ips_methylomes/data.html on 08/05/2012) to BAM format.
############################################################################################################################################################################################
### TODOs ###
############################################################################################################################################################################################
# TODO: Might add MD and NM tags with samtools calmd
############################################################################################################################################################################################
### INPUT FILE FORMAT ###
############################################################################################################################################################################################
## assembly = chromosome name (numeric, hg18)
## strand = strand for which the read is informative ("+" = OT, "-" = OB)
## start = start of read1 (0-based position)
## end = end of read2 (1-based), i.e. intervals are of the form (start, stop] = {start + 1, start + 2, ..., stop}
## sequenceA = sequence of read1 in left-to-right-Watson-strand orientation. Sequence complemented if strand = "-"
## sequenceB = sequence of read2 in left-to-right-Watson-strand orientation. Sequence complemented if strand = "-"
## id = read-ID
# NB: start < end by definition
############################################################################################################################################################################################
### Command line passer ###
############################################################################################################################################################################################
parser = argparse.ArgumentParser(description='Convert Lister-style alignment files of MethylC-Seq data to BAM format.')
parser.add_argument('infile', metavar = 'infile',
help='The filename of the Lister-style file that is to be converted to BAM format')
parser.add_argument('outfile', metavar = 'out.bam',
help='The path to the new SAM/BAM file.')
parser.add_argument('ref_index', metavar = 'reference.fa.fai',
help='The path to the index (.fai file) of reference genome FASTA file.')
args = parser.parse_args()
############################################################################################################################################################################################
### Function definitions ###
# All functions return a 2-tuple - the first element for readL (the leftmost read, regardless of strand) and the second element for readR (the rightmost read, regardless of strand)
# If single-end data then only the first element should be used and the second element is set to None
#############################################################################################################################################################################################
def splitSequence(sequence, strand):
if strand == '-':
sequence = sequence[::-1]
n_blanks = sequence.count('.')
if n_blanks > 0:
sequenceA, sequenceB = filter(None, sequence.rsplit('.'))
else:
half = len(sequence)/2
sequenceA, sequenceB = sequence[:half], sequence[half+1:]
return sequenceA, sequenceB, n_blanks
def makeRNAME(assembly):
rname = ''.join(['chr', assembly])
rnameL = rname
rnameR = rname
return rnameL, rnameR
def makeQNAME(name):
qnameL = name
qnameR = name
return qnameL, qnameR
def makePOS(position, sequenceA, sequenceB, n_blanks):
startL = int(position) # 0-based leftmost mapping position
startR = int(position) + len(sequenceA) + n_blanks # 0-based leftmost mapping position
return startL, startR
def makeFLAG(strand):
flagL = 0x0 # Flag value for read1 in a paired-end read
flagR = 0x0 # Flag value for read2 in a paired-end read
flagL += 0x01 # Paired-end read
flagR += 0x01 # Paired-end read
flagL += 0x02 # Flag is properly-paired according to the aligner (forcing to be true)
flagR += 0x02 # Flag is properly-paired according to the aligner (forcing to be true)
if strand == '+':
flagL += 0x20 # Seq of readR is reverse-complemented
flagR += 0x10 # Seq of readR is reverse-complemented
flagL += 0x40 # Leftmost read is read1
flagR += 0x80 # Rightmost read is read2
elif strand == '-':
flagL += 0x20 # Seq of read1 is reverse-complemented
flagR += 0x10 # Seq of read1 is reverse-complemented
flagR += 0x40 # Rightmost read is read1
flagL += 0x80 # Leftmost read is read2
return flagL, flagR
def makeMAPQ():
return 255, 255 # No mapping information available
def makeCIGAR(sequenceA, sequenceB):
cigarL = [(0, len(sequenceA))]
cigarR = [(0, len(sequenceB))]
return cigarL, cigarR
def makeRNEXT(RNAMEL, RNAMER):
return RNAMER, RNAMEL
def makePNEXT(startL, startR):
return startR, startL
def makeTLEN(startL, startR, lenR):
abs_tlen = startR + lenR - startL # absolute value of TLEN
return abs_tlen, -abs_tlen
def makeSEQ(sequenceA, sequenceB, strand):
if strand == '+':
seqL = sequenceA
seqR = sequenceB
elif strand == '-':
seqL = DNAComplement(sequenceA)
seqR = DNAComplement(sequenceB)
return seqL, seqR
def DNAComplement(strand):
return strand.translate(string.maketrans('TAGCNtagcn', 'ATCGNATCGN'))
def makeQUAL(sequenceA, sequenceB):
qualL = 'E' * len(sequenceA)
qualR = 'E' * len(sequenceB)
return qualL, qualR
def makeXG(strand):
if strand == '+':
XG = 'CT'
elif strand == '-':
XG = 'GA'
XGL = ('XG', XG)
XGR = ('XG', XG)
return XGL, XGR
def createHeader():
FAIDX = open(args.ref_index, 'r')
faidx = FAIDX.read().rstrip().rsplit('\n')
hd = {'VN': '1.0', 'SO': 'unsorted'}
sq = []
for i in range(0, len(faidx)):
line = faidx[i].rsplit('\t')
sq.append({'LN': int(line[1]), 'SN': line[0], 'AS': 'hg18+lambda_phage'})
pgid = 'Lister_style_6_to_bam.py'
vn = '1.0'
cl = ' '.join([pgid, args.infile, args.outfile, args.ref_index])
pg = [{'ID': pgid, 'VN': vn, 'CL': cl}]
header = {'HD': hd, 'SQ': sq, 'PG': pg}
FAIDX.close()
return header
#############################################################################################################################################################################################
### Open files ###
############################################################################################################################################################################################
INFILE = open(args.infile, 'r')
header = createHeader()
BAM = pysam.Samfile(args.outfile, 'wb', header = header)
############################################################################################################################################################################################
### The main loop ###
############################################################################################################################################################################################
# Loop over methylC_seq_reads files file-by-file (i.e. chromosome-by-chromosome)
print 'Input file is', args.infile
linecounter = 1
for line in INFILE: # Loop over the file line-by-line and convert to an AlignedRead instance
line = line.rstrip('\n').rsplit('\t')
# Fields of the Lister-style file
locations = line[0]
readlength = line[1]
score = line[2]
assembly = line[3]
strand = line[4]
position = line[5]
name = line[6]
copies = line[7]
sequence = line[8]
quality = line[9]
sequenceA, sequenceB, n_blanks = splitSequence(sequence, strand)
# Make the SAM/BAM fields
RNAMEL, RNAMER = makeRNAME(assembly)
QNAMEL, QNAMER = makeQNAME(name)
FLAGL, FLAGR = makeFLAG(strand)
POSL, POSR = makePOS(position, sequenceA, sequenceB, n_blanks)
MAPQL, MAPQR = makeMAPQ()
CIGARL, CIGARR = makeCIGAR(sequenceA, sequenceB)
RNEXTL, RNEXTR = makeRNEXT(RNAMEL, RNAMER)
PNEXTL, PNEXTR = makePNEXT(POSL, POSR)
TLENL, TLENR = makeTLEN(POSL, POSR, len(sequenceB))
SEQL, SEQR = makeSEQ(sequenceA, sequenceB, strand)
QUALL, QUALR = makeQUAL(sequenceA, sequenceB)
XGL, XGR = makeXG(strand)
if sequenceA == '':
print 'WARNING: Empty sequenceA at line', linecounter, 'in file', args.infile
print line
if sequenceB == '':
print 'WARNING: Empty sequenceB at line', linecounter, 'in file', args.infile
print line
# Paired-end: using readL/readR notation, thus for the Lister protocol a OT-strand readL=read1 and readR=read2 whereas for OB-strand readL=read2 and readR=read1
readL = pysam.AlignedRead()
readR = pysam.AlignedRead()
readL.rname = BAM.gettid(RNAMEL)
readR.rname = BAM.gettid(RNAMER)
readL.qname = QNAMEL
readR.qname = QNAMER
readL.flag = FLAGL
readR.flag = FLAGR
readL.pos = POSL
readR.pos = POSR
readL.mapq = MAPQL
readR.mapq = MAPQR
readL.cigar = CIGARL
readR.cigar = CIGARR
readL.rnext = BAM.gettid(RNEXTL)
readR.rnext = BAM.gettid(RNEXTR)
readL.pnext = PNEXTL
readR.pnext = PNEXTR
readL.tlen = TLENL
readR.tlen = TLENR
readL.seq = SEQL
readR.seq = SEQR
readL.qual = QUALL
readR.qual = QUALR
readL.tags = readL.tags + [XGL]
readR.tags = readR.tags + [XGL]
if not readL.is_paired:
if readL.opt('XG') == 'CT':
readL.tags = readL.tags + [('XR', 'CT')]
elif readL.opt('XG') == 'GA':
readL.tags = readL.tags + [('XR', 'CT')]
elif readL.is_paired:
if readL.opt('XG') == 'CT' and readL.is_readL1:
readL.tags = readL.tags + [('XR', 'CT')]
elif readL.opt('XG') == 'CT' and readL.is_readL2:
readL.tags = readL.tags + [('XR', 'GA')]
elif readL.opt('XG') == 'GA' and readL.is_readL1:
readL.tags = readL.tags + [('XR', 'CT')]
elif readL.opt('XG') == 'GA' and readL.is_readL2:
readL.tags = readL.tags + [('XR', 'GA')]
if not readR.is_paired:
if readR.opt('XG') == 'CT':
readR.tags = readR.tags + [('XR', 'CT')]
elif readR.opt('XG') == 'GA':
readR.tags = readR.tags + [('XR', 'CT')]
elif readR.is_paired:
if readR.opt('XG') == 'CT' and readR.is_readR1:
readR.tags = readR.tags + [('XR', 'CT')]
elif readR.opt('XG') == 'CT' and readR.is_readR2:
readR.tags = readR.tags + [('XR', 'GA')]
elif readR.opt('XG') == 'GA' and readR.is_readR1:
readR.tags = readR.tags + [('XR', 'CT')]
elif readR.opt('XG') == 'GA' and readR.is_readR2:
readR.tags = readR.tags + [('XR', 'GA')]
BAM.write(readL)
BAM.write(readR)
linecounter += 1
############################################################################################################################################################################################
### Close files
############################################################################################################################################################################################
INFILE.close()
BAM.close()
############################################################################################################################################################################################
|
PeteHaitch/Lister2BAM
|
Python/Lister_style_3_to_bam.py
|
Python
|
mit
| 13,328
|
[
"pysam"
] |
1a45ab66aa8bbb94a7c8380a5a1bb8f52ad33fbcbd37abaaf112089b3d206f84
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import new
import traceback
import gc
from multiprocessing import Pool, Queue
import tinctest
from tinctest import TINCTestCase
import unittest2 as unittest
from unittest import TestResult
def remote_test_invoker(module_name, class_name, method_name, proc_name,
dargs=[], dxargs={}, setups = [], cleanups = []):
"""
A wrapper function that will execute a given test method in an external process.
@type module_name: string
@param module_name: Name of the module
@type class_name: string
@param class_name: Name of the class
@type method_name: string
@param method_name: Name of the test method
@type proc_name: string
@type proc_name: Name of the process that will be used in the logs
@type setups: list
@type setups: A list of (function, args, kwargs) tuple that will be executed as setups
@type cleanups: list
@type cleanups: A list of (function, args, kwargs) tuple that will be executed as cleanups
after the test execution
"""
tinctest.logger.info("Started remote test : %s - %s.%s.%s" % (proc_name, module_name, class_name, method_name) )
try:
full_class_path = module_name + '.' + class_name
# Import the class
parts = full_class_path.split('.')
module = ".".join(parts[:-1])
klass = __import__( module )
for comp in parts[1:]:
klass = getattr(klass, comp)
test_klass_instance = klass(method_name)
# Execute all setups
while setups:
function, args, kwargs = setups.pop(0)
try:
setup_method = getattr(test_klass_instance, function)
tinctest.logger.debug("Calling setup_method %s" % setup_method)
setup_method(*args, **kwargs)
except unittest.case.SkipTest, st:
return [proc_name, tinctest._SKIP_TEST_MSG_PREFIX + str(st)]
except Exception, e:
tinctest.logger.exception("Setup failed: %s - %s.%s.%s" % (proc_name, module_name, class_name, method_name))
return [proc_name, "Setup failed: %s" %traceback.format_exc()]
# Execute the test method
try:
testMethod = getattr(test_klass_instance, method_name)
tinctest.logger.debug("Calling testMethod %s" % testMethod)
testMethod(*dargs, **dxargs)
except unittest.case.SkipTest, st:
return [proc_name, tinctest._SKIP_TEST_MSG_PREFIX + str(st)]
except Exception, e:
tinctest.logger.exception("Test failed: %s - %s.%s.%s" % (proc_name, module_name, class_name, method_name))
return [proc_name, "Test failed: %s" %traceback.format_exc()]
# Execute all cleanups in LIFO
while cleanups:
function, args, kwargs = cleanups.pop(-1)
try:
cleanup_method = getattr(test_klass_instance, function)
tinctest.logger.debug("Calling cleanup_method %s" % cleanup_method)
cleanup_method(*args, **kwargs)
except Exception, e:
tinctest.logger.exception("Cleanup failed: %s - %s.%s.%s" % (proc_name, module_name, class_name, method_name))
return [proc_name, "Cleanup failed: %s" %traceback.formact_exc()]
except Exception, e:
tinctest.logger.exception("Error during invocation: %s" %traceback.format_exc())
return [proc_name, "Error during invocation: %s" %traceback.format_exc()]
tinctest.logger.info("Finished remote test : %s - %s.%s.%s" % (proc_name, module_name, class_name, method_name))
return None
@tinctest.skipLoading("Test model. No tests loaded.")
class ConcurrencyTestCase(TINCTestCase):
"""
This model class should not be instantiated directly and should
be extended for adding test methods.
ConcurrencyTestCase provides an implementation where the test method
will be run concurrently based on the metadata 'concurrency'
@metadata: concurrency: number of concurrent executions of the test case (default: 1)
@metadata: iterations: number of times the concurrent executions are run (default: 1)
"""
def __init__(self, methodName="runTest", baseline_result = None):
self.iterations = None
self.concurrency = None
super(ConcurrencyTestCase, self).__init__(methodName)
def _infer_metadata(self):
super(ConcurrencyTestCase, self)._infer_metadata()
self.iterations = int(self._metadata.get('iterations', '1'))
self.concurrency = int(self._metadata.get('concurrency', '1'))
def run(self, result=None, pool = None):
"""
This method is overriden to implement concurrency for a test method. The default
implementation of unittest's run method will just run the test method directly.
In the implementation, we construct a supplementary test method that will run the
actual test method concurrently based on self.concurrency.
In addition, this accepts an optional 'pool' argument which is passed when a ConcurrencyTestCAse
is used within a ScenarioTestCase.
@type result: TINCTextTestResult
@param result: The result object to be used for this test
@type pool: TINCWorkerPool
@param pool: The worker pool to be used to submit external tests. If not provided, a new worker pool will be created.
This is to enable ScenarioTestCase and ConcurrencyTestCase share the same worker pool.
"""
# RB: For ConcurrencyTestCase, we should run the test method for
# 'self.iterations' number of times. So, we create a new instance
# method that runs self._testMethodName the desired number of times
# concurrently using a worker pool of size self.concurrency
# and set self._testMethodName to the new method before calling super.run().
# Note - The test will be reported using the new method instead of the original
# test method. We will re-visit this later.
self._orig_testMethodName = self._testMethodName
worker_pool = pool
def test_function(my_self):
my_class = my_self.__class__.__name__
my_module = my_self.__module__
my_method_name = my_self._orig_testMethodName
for iter in xrange(my_self.iterations):
tinctest.logger.info("Starting iteration# %s of total %s..." % (str(iter + 1), str(my_self.iterations)))
should_wait = True
# TODO - Parameterize maximum pool size
if worker_pool is None:
pool = TINCTestWorkerPool(100)
else:
# This means that this test is being executed through a ScenarioTestCase
# and we should defer inspecting the results to the scenario test case.
pool = worker_pool
should_wait = False
for i in xrange(my_self.concurrency):
proc_prefix = "%s_proc_" %my_self._testMethodName
proc_postfix = "_iter_%s_proc_%s" %(str(iter + 1), str(i + 1))
proc_name = proc_prefix + proc_postfix
# We use 'run_worker' here that will simply call out to the
# super class' run method. ConcurrencyTestCase.run method has
# the logic to create a new test method and we would not want this to be done twice.
pool.submit_test(my_module, my_class, my_method_name, proc_name)
# Wait and inspect only when the concurrency test case is executed by itself.
# Defer result inspection when concurrency test case is executed through
# a scenario test case.
if should_wait:
pool.join()
# Inspect the result_queue for failures or errors
try:
if pool.has_failures():
failure_string = pool.get_failure_string()
failure_index = failure_string.find(" failed execution")
if failure_index != -1:
failure_string = failure_string[:failure_index]
self.fail("Workers encountered errors or failures: %s" % failure_string)
finally:
pool.terminate()
test_method = new.instancemethod(test_function,
self, self.__class__)
self.__dict__[ self._testMethodName + "*"] = test_method
self._testMethodName = self._testMethodName + "*"
super(ConcurrencyTestCase, self).run(result)
class TINCTestWorkerPool(object):
"""
A wrapper around multiprocessing.pool for handling concurrency in TINC. Used from within
ConcurrencyTestCase and ScenarioTestCase
"""
def __init__(self, worker_pool_size = 100):
"""
Initialize a multiprocessing.pool
@param worker_pool_size: Size of the worker pool
@type worker_pool_size: integer
"""
tinctest.logger.info("Initializing worker pool with %d workers" % worker_pool_size)
# The Queue object that will be shared between the current process and the process
# that will run the test method. The external process will update the queue with
# failure information which will be inspected from the runner for failures.
self.result_queue = Queue()
self.skipped_queue = Queue()
self.total_tests = 0
# A list of two-tuples containing the name of the worker tha failed and the traceback
# as a string object from the remote process
self.failed_workers = []
# String containing worker name that failed and complete traceback string for each failed worker
self._failure_info = ''
# String containing worker name that failed and just the failure message from the traceback string for each failed worker
self._brief_failure_info = ''
gc.disable()
self.pool = Pool(worker_pool_size)
gc.enable()
# callback function for each spawned process in the pool.
# the ret parameter is the return value from the process's executor funtion
def remote_test_cb(self, ret):
# keep track of the total number of tests in the future we may need
# to find out if all tests in the concurrency/scenario suite were skipped
# this variable will be helpful to decide that
self.total_tests += 1
if ret:
if ret[1].find(tinctest._SKIP_TEST_MSG_PREFIX) != -1:
self.skipped_queue.put(ret)
else:
self.result_queue.put(ret)
def submit_test(self, module_name, class_name, method_name, proc_name = 'remote_test_process', dargs=[], dxargs={}, setups = [], cleanups = []):
"""
Submit a test case asynchronously for remote execution
@param module_name: Name of the module where the test resides
@type module_name: string
@param class_name: Name of the class where the test resides
@type class_name: string
@param method_name: Name of the test method to be executed remotely through this worker pool
@type method_name: string
@param proc_name: Name to be used for the process that is started for this test submitted
@type proc_name: string
@param dargs: A list of non-keyword arguments to be passed to the submitted test
@type dargs: list
@param dxargs: A dict of keyworkd arguments to be passed to the test while invoking
@type dxargs: dict
@param setups: A list of method names that should be run before the actual test is executed
@type setups: list
@param cleanups: A list of method names that should be run after the actual test is executed
@type cleanups: list
"""
self.pool.apply_async(remote_test_invoker, [module_name, class_name, method_name, proc_name, dargs, dxargs, setups, cleanups], callback=self.remote_test_cb)
def join(self):
"""
Join the worker pool. Will wait till all the tasks in the pool finishes execution
"""
self.pool.close()
self.pool.join()
# Find failed workers
self._find_failed_workers()
def _find_failed_workers(self):
"""
Inspect the result queue that will contain the failed workers and populate self.failed_workers
"""
while not self.result_queue.empty():
tinctest.logger.error("Failures encountered in at least one of the test workers.")
worker_info = self.result_queue.get()
self.failed_workers.append((worker_info[0], worker_info[1]))
def has_failures(self):
"""
Returns True / False depending on whether there are failures in the tasks submitted through this instance
of the pool
@rtype: boolean
@return: True if there are failures in the submitted tasks, False otherwise
"""
return len(self.failed_workers) > 0
def inspect(self):
"""
Inspect the result queue and returns list of workers that failed or errored in a
tuple containing the worker name and the traceback string
@rtype: list of two-tuples
@return:
"""
if self.has_failures():
tinctest.logger.error("Failures encountered in at least one of the test workers.")
def get_failure_string(self):
"""
Return an aggregated failure string for all the tasks submitted through this instance of the worker pool
"""
for failed_worker in self.failed_workers:
self._failure_info += "Worker %s failed execution : \n %s\n" %(failed_worker[0], failed_worker[1])
return self._failure_info
def get_brief_failure_string(self):
"""
Similar to get_failure_string(), however, returns worker names and just the error message from the exception
instead of the whole stack trace
"""
for failed_worker in self.failed_workers:
error_msg = ''
if failed_worker[1] and len(failed_worker[1].split('\n')) >=2:
error_msg = failed_worker[1].split('\n')[-2]
self._brief_failure_info += "Worker %s failed execution: %s\n" %(failed_worker[0], error_msg)
return self._brief_failure_info
def terminate(self):
"""
Termiates the worker pool. Disable gc to avoid hangs
"""
gc.disable()
self.pool.terminate()
gc.enable()
|
rvs/gpdb
|
src/test/tinc/tinctest/models/concurrency/__init__.py
|
Python
|
apache-2.0
| 15,427
|
[
"VisIt"
] |
411578372c88625bb14318e6c7ec65dbfee9d9b90b2667b19139390f077d050c
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Tools/Database Processing/Rebuild Secondary Indexes"""
#-------------------------------------------------------------------------
#
# python modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".Rebuild")
#-------------------------------------------------------------------------
#
# gtk modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gui.plug import tool
from gramps.gui.dialog import OkDialog
from gramps.gen.updatecallback import UpdateCallback
#-------------------------------------------------------------------------
#
# runTool
#
#-------------------------------------------------------------------------
class Rebuild(tool.Tool, UpdateCallback):
def __init__(self, dbstate, user, options_class, name, callback=None):
uistate = user.uistate
tool.Tool.__init__(self, dbstate, options_class, name)
if self.db.readonly:
return
self.db.disable_signals()
if uistate:
self.callback = uistate.pulse_progressbar
uistate.set_busy_cursor(True)
uistate.progress.show()
uistate.push_message(dbstate, _("Rebuilding secondary indexes..."))
UpdateCallback.__init__(self, self.callback)
self.set_total(12)
self.db.rebuild_secondary(self.update)
self.reset()
uistate.set_busy_cursor(False)
uistate.progress.hide()
OkDialog(_("Secondary indexes rebuilt"),
_('All secondary indexes have been rebuilt.'),
parent=uistate.window)
else:
print("Rebuilding Secondary Indexes...")
self.db.rebuild_secondary(self.update_empty)
print("All secondary indexes have been rebuilt.")
self.db.enable_signals()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class RebuildOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
|
beernarrd/gramps
|
gramps/plugins/tool/rebuild.py
|
Python
|
gpl-2.0
| 3,587
|
[
"Brian"
] |
37c8089e82af6548b524d52ec106293d9fa45bb69d98f6d30eea7b81bbd82297
|
'''
Created on Mar 17, 2012
@author: hampt
'''
import os
import commands
from Logger import Logger
from BlastWrapper import blastallParameters, createBlastPGPCommand
from metastudentPkg.commons import p, splitBigFastaFile
import sys
import shlex
import subprocess
import time
def runBlast(inputFilePath, blastDatabasePath, outputFilePath, tmpDir, eValue, iters, configMap):
fastaSplits = splitBigFastaFile(inputFilePath, tmpDir, configMap["FASTA_SPLIT_SIZE"])
for i, fastaSplit in enumerate(fastaSplits):
blastParas = blastallParameters()
blastParas.setBlastExePath(configMap["BLASTPGP_EXE_PATH"])
blastParas.setBlastDatabasePath(blastDatabasePath)
blastParas.setEValue(eValue)
blastParas.setJ(iters)
blastParas.setB(1000)
blastParas.setV(1000)
blastParas.setInputFilePath(fastaSplit)
blastParas.setOutputFilePath(fastaSplit+".blast")
blastCommand = createBlastPGPCommand(blastParas)
#print blastCommand
# executeCommandInSGELocalAsync(blastCommand, "blast_%d" % i, tmpDir)
#
# executeCommandInSGELocalAsyncJoin()
s, o = commands.getstatusoutput(blastCommand)
if s != 0:
Logger.log("!!!Error!!! " + blastCommand)
Logger.log(s)
Logger.log(o)
raise Exception
p("Merging Blast Output")
# allRoundSplits = ["" for bla in range(int(NUMBER_OF_ITERATIONS))]
bigOutputFile = open(outputFilePath,'w')
for fastaSplit in fastaSplits:
blastFile = open(fastaSplit+".blast")
blastOutput = "\n" + blastFile.read()
blastFile.close()
bigOutputFile.write(blastOutput)
bigOutputFile.close()
def runMethodA(blastOutputFilePath, fastaFilePath, tmpDir, GROUP_A_THRESHOLD, GROUP_A_K, iters, onto, configMap):#
p("Running Method A")
currCwd = os.getcwd()
tmpDirPath = os.path.join(tmpDir, "methodA")
jarPath = os.path.join(configMap["JAR_INSTALL_FOLDER_PATH"], "gossip.jar")
outputFilePath = os.path.join(tmpDirPath, "output.%s.cafa.txt" % (onto))
commandsi = ["mkdir -p " + tmpDirPath,
"cd " + configMap["GROUP_A_PATH"],
"java -cp %s GOSSIPSTarter %s %s %s %s %s %s" % (jarPath, fastaFilePath, blastOutputFilePath, outputFilePath, GROUP_A_THRESHOLD, GROUP_A_K, iters)]
commandString = ";".join(commandsi)
os.chdir(currCwd)
logFile=None
logPath=os.path.join(tmpDir, "logA.txt")
if not os.path.exists(logPath):
logFile = open(logPath, 'w')
else:
logFile = open(logPath, 'a')
s, o = commands.getstatusoutput(commandString)
if True:#s != 0:
logFile.write("Command: " + commandString + "\n")
logFile.write(str(s)+"\n")
logFile.write(o+"\n")
if s != 0:
print >> sys.stderr,"!!!Error!!! " + commandString
print >> sys.stderr,str(s)
print >> sys.stderr,o
#raise
logFile.close()
outputFilePath = outputFilePath+".cafa"
outputFile = open(outputFilePath)
preds = set([])
predFilesContent=[]
for line in outputFile:
if line.startswith("AUTHOR") or line.startswith("MODEL") or line.startswith("ACCURACY") or line.startswith("KEYWORDS") or line.startswith("END"):
None
else:
targetId, goTerm, rel = line.rstrip().split("\t")
targetId = targetId[1:].split("(")[0][:63]
relFloat = max(min(float(rel), 1.00), 0.00)
rel = "%.2f" % (relFloat)
line = targetId + "\t" + goTerm + "\t" + rel
if targetId + "\t" + goTerm not in preds and goTerm.strip() != "":
predFilesContent.append(line)
preds.add(targetId + "\t" + goTerm)
return predFilesContent
def runMethodB(blastOutputFilePath, fastaFilePath, tmpDir, GROUP_B_K, onto, configMap):#
p("Running Method B")
currCwd = os.getcwd()
tmpDirPath = os.path.join(tmpDir, "methodB")
if not os.path.exists(tmpDirPath):
os.mkdir(tmpDirPath)
jarPath = configMap["JAR_INSTALL_FOLDER_PATH"].rstrip("/")
outputFilePath = os.path.join(tmpDirPath, onto)
os.mkdir(outputFilePath)
commandsi = ["mkdir -p " + tmpDirPath,
"cd " + configMap["GROUP_B_PATH"],
"./knn_weighted -m weighted_knn -j %s -d %s -i %s -o %s -k %s -l %s" % (blastOutputFilePath, blastOutputFilePath, fastaFilePath, outputFilePath, GROUP_B_K, jarPath)]
commandString = ";".join(commandsi)
#print commandString
os.chdir(currCwd)
logFile=None
logPath=os.path.join(tmpDir, "logB.txt")
if not os.path.exists(logPath):
logFile = open(logPath, 'w')
else:
logFile = open(logPath, 'a')
s, o = commands.getstatusoutput(commandString)
if True:#s != 0:
logFile.write("Command: " + commandString + "\n")
logFile.write(str(s) + "\n")
logFile.write(o+ "\n")
if s != 0:
print >> sys.stderr,"!!!Error!!! " + commandString
print >> sys.stderr,str(s)
print >> sys.stderr,o
#raise
logFile.close()
outputFilePath = os.path.join(outputFilePath, os.path.basename(fastaFilePath)+".weighted_knn.predicted_leaves")
idToGoTermCount = {}
predFilesContent=[]
with open(outputFilePath) as f:
for line in f.readlines():
line = line.rstrip()
if line.startswith("AUTHOR") or line.startswith("MODEL") or line.startswith("ACCURACY") or line.startswith("KEYWORDS") or line.startswith("END"):
None
else:
currId = line.split("\t")[0][:63]
restOfLine = "\t".join(line.split("\t")[1:])
if idToGoTermCount.get(currId, 0) < 1000:
if float(restOfLine.split("\t")[1]) > 1.0:
restOfLine = restOfLine.rstrip("\n").replace("1.01", "1.00")
idToGoTermCount[currId] = idToGoTermCount.get(currId, 0) + 1
line = currId + "\t" + restOfLine
predFilesContent.append(line)
return predFilesContent
def runMethodC(blastOutputFilePath, fastaFilePath, tmpDir, scoring, onto, configMap, debug):#
p("Running Method C")
currCwd = os.getcwd()
tmpDirPath = os.path.join(tmpDir, "methodC")
outputFilePath = os.path.join(tmpDirPath, "output.%s.txt" % (onto))
commandsi = ["mkdir -p " + tmpDirPath,
"cd " + configMap["GROUP_C_PATH"],
"./CafaWrapper3.pl %s %s %s %s" % (blastOutputFilePath, outputFilePath, scoring, tmpDirPath)]
commandString = ";".join(commandsi)
if debug:
print >> sys.stderr, commandString
os.chdir(currCwd)
logFile=None
logPath=os.path.join(tmpDir, "logC.txt")
if not os.path.exists(logPath):
logFile = open(logPath, 'w')
else:
logFile = open(logPath, 'a')
s, o = commands.getstatusoutput(commandString)
if True:#s != 0:
logFile.write("Command: " + commandString)
logFile.write(str(s))
logFile.write(o)
if s != 0:
print >> sys.stderr, "!!!Error!!! " + commandString
print >> sys.stderr,str(s)
print >> sys.stderr,o
#raise
logFile.close()
predFilesContent = []
with open(outputFilePath) as f:
preds = set([])
for line in f.readlines():
line=line.rstrip()
if line.rstrip() == "" or line.startswith("AUTHOR") or line.startswith("MODEL") or line.startswith("ACCURACY") or line.startswith("KEYWORDS") or line.startswith("END"):
None
else:
targetId, goTerm, rel = line.rstrip().split(" ")
relFloat = max(min(float(rel), 1.00), 0.00)
rel = "%.2f" % (relFloat)
line = targetId[:63] + "\t" + goTerm + "\t" + rel
if targetId[:63] + "\t" + goTerm not in preds and goTerm.strip() != "" and float(rel) > 0.0:
predFilesContent.append(line)
preds.add(targetId[:63] + "\t" + goTerm)
return predFilesContent
|
Rostlab/MetaStudent
|
metastudentPkg/runMethods.py
|
Python
|
gpl-2.0
| 7,135
|
[
"BLAST"
] |
0e3c2dce5e1bc11bb2e43082cbcfbc2f62377e8a67f4d04c819cbbcc5be34e31
|
"""Joint variant calling with multiple samples: aka squaring off, or backfilling.
Handles the N+1 problem of variant calling by combining and recalling samples
previously calling individually (or in smaller batches). Recalls at all positions found
variable in any of the input samples within each batch. Takes a general approach supporting
GATK's incremental joint discovery (http://www.broadinstitute.org/gatk/guide/article?id=3893)
and FreeBayes's N+1 approach (https://groups.google.com/d/msg/freebayes/-GK4zI6NsYY/Wpcp8nt_PVMJ)
as implemented in bcbio.variation.recall (https://github.com/chapmanb/bcbio.variation.recall).
"""
import collections
import math
import os
import pysam
import toolz as tz
from bcbio import broad, utils
from bcbio.bam import ref
from bcbio.distributed.split import grouped_parallel_split_combine
from bcbio.pipeline import config_utils, region
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import bamprep, gatkjoint, genotype, multi
SUPPORTED = {"general": ["freebayes", "platypus", "samtools"],
"gatk": ["gatk-haplotype"]}
def _get_callable_regions(data):
"""Retrieve regions to parallelize by from callable regions, variant regions or chromosomes
"""
import pybedtools
callable_files = data.get("callable_regions") or data.get("variant_regions")
if callable_files:
assert len(callable_files) == 1
regions = [(r.chrom, int(r.start), int(r.stop)) for r in pybedtools.BedTool(callable_files[0])]
else:
work_bam = list(tz.take(1, filter(lambda x: x.endswith(".bam"), data["work_bams"])))
if work_bam:
with pysam.Samfile(work_bam[0], "rb") as pysam_bam:
regions = [(chrom, 0, length) for (chrom, length) in zip(pysam_bam.references,
pysam_bam.lengths)]
else:
regions = [(r.name, 0, r.size) for r in
ref.file_contigs(dd.get_ref_file(data), data["config"])]
return regions
def _split_by_callable_region(data):
"""Split by callable or variant regions.
We expect joint calling to be deep in numbers of samples per region, so prefer
splitting aggressively by regions.
"""
batch = tz.get_in(("metadata", "batch"), data)
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
name = batch if batch else tz.get_in(("rgnames", "sample"), data)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "joint", jointcaller, name))
utils.safe_makedir(os.path.join(out_dir, "inprep"))
parts = []
for feat in _get_callable_regions(data):
region_dir = utils.safe_makedir(os.path.join(out_dir, feat[0]))
region_prep_dir = os.path.join(region_dir, "inprep")
if not os.path.exists(region_prep_dir):
os.symlink(os.path.join(os.pardir, "inprep"), region_prep_dir)
region_outfile = os.path.join(region_dir, "%s-%s.vcf.gz" % (batch, region.to_safestr(feat)))
parts.append((feat, data["work_bams"], data["vrn_files"], region_outfile))
out_file = os.path.join(out_dir, "%s-joint.vcf.gz" % name)
return out_file, parts
def _is_jointcaller_compatible(data):
"""Match variant caller inputs to compatible joint callers.
"""
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
return jointcaller == "%s-joint" % variantcaller or not variantcaller
def square_off(samples, run_parallel):
"""Perform joint calling at all variants within a batch.
"""
to_process = []
extras = []
for data in [x[0] for x in samples]:
added = False
if tz.get_in(("metadata", "batch"), data):
for add in genotype.handle_multiple_callers(data, "jointcaller"):
if _is_jointcaller_compatible(add):
added = True
to_process.append([add])
if not added:
extras.append([data])
processed = grouped_parallel_split_combine(to_process, _split_by_callable_region,
multi.group_batches_joint, run_parallel,
"square_batch_region", "concat_variant_files",
"vrn_file", ["region", "sam_ref", "config"])
return _combine_to_jointcaller(processed) + extras
def _combine_to_jointcaller(processed):
"""Add joint calling information to variants, while collapsing independent regions.
"""
by_vrn_file = collections.OrderedDict()
for data in (x[0] for x in processed):
key = (tz.get_in(("config", "algorithm", "jointcaller"), data), data["vrn_file"])
if key not in by_vrn_file:
by_vrn_file[key] = []
by_vrn_file[key].append(data)
out = []
for grouped_data in by_vrn_file.values():
cur = grouped_data[0]
out.append([cur])
return out
def get_callers():
return ["%s-joint" % x for x in SUPPORTED["general"]] + \
["%s-merge" % x for x in SUPPORTED["general"]] + \
["%s-joint" % x for x in SUPPORTED["gatk"]]
def square_batch_region(data, region, bam_files, vrn_files, out_file):
"""Perform squaring of a batch in a supplied region, with input BAMs
"""
if not utils.file_exists(out_file):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
if jointcaller in ["%s-joint" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "square")
elif jointcaller in ["%s-merge" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "merge")
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gatk"]]:
gatkjoint.run_region(data, region, vrn_files, out_file)
else:
raise ValueError("Unexpected joint calling approach: %s" % jointcaller)
if region:
data["region"] = region
data = _fix_orig_vcf_refs(data)
data["vrn_file"] = out_file
return [data]
def _fix_orig_vcf_refs(data):
"""Supply references to initial variantcalls if run in addition to batching.
"""
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if variantcaller:
data["vrn_file_orig"] = data["vrn_file"]
for i, sub in enumerate(data["group_orig"]):
sub_vrn = sub.pop("vrn_file", None)
if sub_vrn:
sub["vrn_file_orig"] = sub_vrn
data["group_orig"][i] = sub
return data
def _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file,
todo="square"):
"""Run squaring or merging analysis using bcbio.variation.recall.
"""
ref_file = tz.get_in(("reference", "fasta", "base"), data)
cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1)
resources = config_utils.get_resources("bcbio-variation-recall", data["config"])
# adjust memory by cores but leave room for run program memory
memcores = int(math.ceil(float(cores) / 5.0))
jvm_opts = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms250m", "-Xmx2g"]),
{"algorithm": {"memory_adjust": {"direction": "increase",
"magnitude": memcores}}})
# Write unique VCFs and BAMs to input file
input_file = "%s-inputs.txt" % os.path.splitext(out_file)[0]
with open(input_file, "w") as out_handle:
out_handle.write("\n".join(sorted(list(set(vrn_files)))) + "\n")
if todo == "square":
out_handle.write("\n".join(sorted(list(set(bam_files)))) + "\n")
variantcaller = tz.get_in(("config", "algorithm", "jointcaller"), data).replace("-joint", "")
cmd = ["bcbio-variation-recall", todo] + jvm_opts + broad.get_default_jvm_opts() + \
["-c", cores, "-r", bamprep.region_to_gatk(region)]
if todo == "square":
cmd += ["--caller", variantcaller]
cmd += [out_file, ref_file, input_file]
bcbio_env = utils.get_bcbio_env()
cmd = " ".join(str(x) for x in cmd)
do.run(cmd, "%s in region: %s" % (cmd, bamprep.region_to_gatk(region)), env=bcbio_env)
return out_file
|
brainstorm/bcbio-nextgen
|
bcbio/variation/joint.py
|
Python
|
mit
| 8,435
|
[
"pysam"
] |
3d0894d4666a096308bdfbf858b42a348d3d53faf55cafa5a1491b0759fbfccf
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import copy
import tests_common
import espressomd
import espressomd.lb
import espressomd.observables
"""
Tests for the LB fluid profile observables.
"""
TIME_STEP = 0.1
AGRID = 0.7
BOX_L_X = 17.0 * AGRID
BOX_L_Y = 17.0 * AGRID
BOX_L_Z = 17.0 * AGRID
VISC = .7
DENS = 1.7
LB_PARAMS = {'agrid': AGRID,
'dens': DENS,
'visc': VISC,
'tau': TIME_STEP
}
LB_VELOCITY_PROFILE_PARAMS = {
'n_x_bins': int(BOX_L_X / AGRID),
'n_y_bins': int(BOX_L_Y / AGRID),
'n_z_bins': int(BOX_L_Z / AGRID),
'min_x': 0.0,
'min_y': 0.0,
'min_z': 0.0,
'max_x': BOX_L_X,
'max_y': BOX_L_Y,
'max_z': BOX_L_Z,
'sampling_delta_x': AGRID,
'sampling_delta_y': AGRID,
'sampling_delta_z': AGRID,
'sampling_offset_x': 0.5 * AGRID,
'sampling_offset_y': 0.5 * AGRID,
'sampling_offset_z': 0.5 * AGRID,
'allow_empty_bins': False}
class ObservableProfileLBCommon:
lbf = None
system = espressomd.System(box_l=[BOX_L_X, BOX_L_Y, BOX_L_Z])
system.time_step = TIME_STEP
system.cell_system.skin = 0.4 * AGRID
def set_fluid_velocities(self):
"""Set an x dependent fluid velocity."""
for x in range(int(self.system.box_l[0] / AGRID)):
for y in range(int(self.system.box_l[1] / AGRID)):
for z in range(int(self.system.box_l[2] / AGRID)):
self.lbf[x, y, z].velocity = [float(x), 0.0, 0.0]
def test_velocity_profile(self):
self.set_fluid_velocities()
obs = espressomd.observables.LBVelocityProfile(
**LB_VELOCITY_PROFILE_PARAMS)
obs_data = obs.calculate()
obs_edges = obs.call_method("edges")
_, np_edges = tests_common.get_histogram(
np.zeros([1, 3]), LB_VELOCITY_PROFILE_PARAMS, 'cartesian',
normed=True)
for i in range(3):
np.testing.assert_array_almost_equal(obs_edges[i], np_edges[i])
for x in range(obs_data.shape[0]):
for y in range(obs_data.shape[1]):
for z in range(obs_data.shape[2]):
self.assertAlmostEqual(
obs_data[x, y, z, 0], float(x), places=5)
self.assertEqual(np.prod(obs_data.shape),
LB_VELOCITY_PROFILE_PARAMS['n_x_bins'] *
LB_VELOCITY_PROFILE_PARAMS['n_y_bins'] *
LB_VELOCITY_PROFILE_PARAMS['n_z_bins'] * 3)
def test_error_sampling_delta_of_0(self):
lb_velocity_params_local = copy.copy(LB_VELOCITY_PROFILE_PARAMS)
lb_velocity_params_local['sampling_delta_x'] = 0.0
lb_velocity_params_local['sampling_delta_y'] = 0.0
lb_velocity_params_local['sampling_delta_z'] = 0.0
with self.assertRaises(RuntimeError):
_ = espressomd.observables.LBVelocityProfile(
**lb_velocity_params_local)
def test_error_if_no_LB(self):
self.system.actors.clear()
obs = espressomd.observables.LBVelocityProfile(
**LB_VELOCITY_PROFILE_PARAMS)
with self.assertRaises(RuntimeError):
obs.calculate()
def test_error_if_empty_bin(self):
lb_velocity_params_local = copy.copy(LB_VELOCITY_PROFILE_PARAMS)
lb_velocity_params_local['sampling_delta_x'] = 3.0
obs = espressomd.observables.LBVelocityProfile(
**lb_velocity_params_local)
with self.assertRaises(RuntimeError):
obs.calculate()
def test_lb_profile_interface(self):
# test setters and getters
params = LB_VELOCITY_PROFILE_PARAMS.copy()
params['n_x_bins'] = 4
params['n_y_bins'] = 6
params['n_z_bins'] = 8
obs = espressomd.observables.LBVelocityProfile(**params)
# check flag
self.assertFalse(obs.allow_empty_bins)
obs.allow_empty_bins = True
self.assertTrue(obs.allow_empty_bins)
# check bins
self.assertEqual(obs.n_x_bins, 4)
self.assertEqual(obs.n_y_bins, 6)
self.assertEqual(obs.n_z_bins, 8)
obs_data = obs.calculate()
np.testing.assert_array_equal(obs_data.shape, [4, 6, 8, 3])
obs.n_x_bins = 1
obs.n_y_bins = 2
obs.n_z_bins = 3
obs_data = obs.calculate()
np.testing.assert_array_equal(obs_data.shape, [1, 2, 3, 3])
# check edges lower corner
self.assertEqual(obs.min_x, params['min_x'])
self.assertEqual(obs.min_y, params['min_y'])
self.assertEqual(obs.min_z, params['min_z'])
obs.min_x = 4
obs.min_y = 5
obs.min_z = 6
self.assertEqual(obs.min_x, 4)
self.assertEqual(obs.min_y, 5)
self.assertEqual(obs.min_z, 6)
obs_bin_edges = obs.bin_edges()
np.testing.assert_array_equal(obs_bin_edges[0, 0, 0], [4, 5, 6])
# check edges upper corner
self.assertEqual(obs.max_x, params['max_x'])
self.assertEqual(obs.max_y, params['max_y'])
self.assertEqual(obs.max_z, params['max_z'])
obs.max_x = 7
obs.max_y = 8
obs.max_z = 9
self.assertEqual(obs.max_x, 7)
self.assertEqual(obs.max_y, 8)
self.assertEqual(obs.max_z, 9)
obs_bin_edges = obs.bin_edges()
np.testing.assert_array_equal(obs_bin_edges[-1, -1, -1], [7, 8, 9])
# check delta
self.assertEqual(obs.sampling_delta_x, params['sampling_delta_x'])
self.assertEqual(obs.sampling_delta_y, params['sampling_delta_y'])
self.assertEqual(obs.sampling_delta_z, params['sampling_delta_z'])
obs.sampling_delta_x = 10
obs.sampling_delta_y = 11
obs.sampling_delta_z = 12
self.assertEqual(obs.sampling_delta_x, 10)
self.assertEqual(obs.sampling_delta_y, 11)
self.assertEqual(obs.sampling_delta_z, 12)
# check delta
self.assertEqual(obs.sampling_offset_x, params['sampling_offset_x'])
self.assertEqual(obs.sampling_offset_y, params['sampling_offset_y'])
self.assertEqual(obs.sampling_offset_z, params['sampling_offset_z'])
obs.sampling_offset_x = 13
obs.sampling_offset_y = 14
obs.sampling_offset_z = 15
self.assertEqual(obs.sampling_offset_x, 13)
self.assertEqual(obs.sampling_offset_y, 14)
self.assertEqual(obs.sampling_offset_z, 15)
class LBCPU(ut.TestCase, ObservableProfileLBCommon):
"""Test for the CPU implementation of the LB."""
def setUp(self):
self.lbf = espressomd.lb.LBFluid(**LB_PARAMS)
self.system.actors.clear()
self.system.actors.add(self.lbf)
@utx.skipIfMissingGPU()
class LBGPU(ut.TestCase, ObservableProfileLBCommon):
"""Test for the GPU implementation of the LB."""
def setUp(self):
self.lbf = espressomd.lb.LBFluidGPU(**LB_PARAMS)
self.system.actors.clear()
self.system.actors.add(self.lbf)
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/observable_profileLB.py
|
Python
|
gpl-3.0
| 7,716
|
[
"ESPResSo"
] |
8f17a625e50d54dbd5decda93fe5063c86fd6eb2d386dfacc2883cbc862c5d41
|
r"""OS routines for NT or Posix depending on what system we're on.
This exports:
- all functions from posix, nt or ce, e.g. unlink, stat, etc.
- os.path is either posixpath or ntpath
- os.name is either 'posix', 'nt' or 'ce'.
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator (always '.')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
import stat as st
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep",
"defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR",
"SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen",
"popen", "extsep"]
def _exists(name):
return name in globals()
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
# Any new dependencies of the os module and/or changes in path separator
# requires updating importlib as well.
if 'posix' in _names:
name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
__all__.append('_exit')
except ImportError:
pass
import posixpath as path
try:
from posix import _have_functions
except ImportError:
pass
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
__all__.append('_exit')
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
try:
from nt import _have_functions
except ImportError:
pass
elif 'ce' in _names:
name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
__all__.append('_exit')
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
try:
from ce import _have_functions
except ImportError:
pass
else:
raise ImportError('no os specific module found')
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
if _exists("_have_functions"):
_globals = globals()
def _add(str, fn):
if (fn in _globals) and (str in _have_functions):
_set.add(_globals[fn])
_set = set()
_add("HAVE_FACCESSAT", "access")
_add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_FUTIMESAT", "utime")
_add("HAVE_LINKAT", "link")
_add("HAVE_MKDIRAT", "mkdir")
_add("HAVE_MKFIFOAT", "mkfifo")
_add("HAVE_MKNODAT", "mknod")
_add("HAVE_OPENAT", "open")
_add("HAVE_READLINKAT", "readlink")
_add("HAVE_RENAMEAT", "rename")
_add("HAVE_SYMLINKAT", "symlink")
_add("HAVE_UNLINKAT", "unlink")
_add("HAVE_UNLINKAT", "rmdir")
_add("HAVE_UTIMENSAT", "utime")
supports_dir_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
supports_effective_ids = _set
_set = set()
_add("HAVE_FCHDIR", "chdir")
_add("HAVE_FCHMOD", "chmod")
_add("HAVE_FCHOWN", "chown")
_add("HAVE_FDOPENDIR", "listdir")
_add("HAVE_FEXECVE", "execve")
_set.add(stat) # fstat always works
_add("HAVE_FTRUNCATE", "truncate")
_add("HAVE_FUTIMENS", "utime")
_add("HAVE_FUTIMES", "utime")
_add("HAVE_FPATHCONF", "pathconf")
if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3
_add("HAVE_FSTATVFS", "statvfs")
supports_fd = _set
_set = set()
_add("HAVE_FACCESSAT", "access")
# Some platforms don't support lchmod(). Often the function exists
# anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP.
# (No, I don't know why that's a good design.) ./configure will detect
# this and reject it--so HAVE_LCHMOD still won't be defined on such
# platforms. This is Very Helpful.
#
# However, sometimes platforms without a working lchmod() *do* have
# fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15,
# OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes
# it behave like lchmod(). So in theory it would be a suitable
# replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s
# flag doesn't work *either*. Sadly ./configure isn't sophisticated
# enough to detect this condition--it only determines whether or not
# fchmodat() minimally works.
#
# Therefore we simply ignore fchmodat() when deciding whether or not
# os.chmod supports follow_symlinks. Just checking lchmod() is
# sufficient. After all--if you have a working fchmodat(), your
# lchmod() almost certainly works too.
#
# _add("HAVE_FCHMODAT", "chmod")
_add("HAVE_FCHOWNAT", "chown")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_LCHFLAGS", "chflags")
_add("HAVE_LCHMOD", "chmod")
if _exists("lchown"): # mac os x10.3
_add("HAVE_LCHOWN", "chown")
_add("HAVE_LINKAT", "link")
_add("HAVE_LUTIMES", "utime")
_add("HAVE_LSTAT", "stat")
_add("HAVE_FSTATAT", "stat")
_add("HAVE_UTIMENSAT", "utime")
_add("MS_WINDOWS", "stat")
supports_follow_symlinks = _set
del _set
del _have_functions
del _globals
del _add
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
# Other possible SEEK values are directly imported from posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode, exist_ok)
except FileExistsError:
# be happy if someone already created the path
pass
cdir = curdir
if isinstance(tail, bytes):
cdir = bytes(curdir, 'ASCII')
if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists
return
try:
mkdir(name, mode)
except OSError as e:
if not exist_ok or e.errno != errno.EEXIST or not path.isdir(name):
raise
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune the
search, or to impose a specific order of visiting. Modifying dirnames when
topdown is false is ineffective, since the directories in dirnames have
already been generated by the time dirnames itself is generated. No matter
the value of topdown, the list of subdirectories is retrieved before the
tuples for the directory and its subdirectories are generated.
By default errors from the os.scandir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an OSError instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([getsize(join(root, name)) for name in files]), end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
dirs = []
nondirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that scandir is global in this module due
# to earlier import-*.
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
yield from walk(entry.path, topdown, onerror, followlinks)
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
islink, join = path.islink, path.join
for name in dirs:
new_path = join(top, name)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
yield from walk(new_path, topdown, onerror, followlinks)
else:
# Yield after recursion if going bottom up
yield top, dirs, nondirs
__all__.append("walk")
if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd:
def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None):
"""Directory tree generator.
This behaves exactly like walk(), except that it yields a 4-tuple
dirpath, dirnames, filenames, dirfd
`dirpath`, `dirnames` and `filenames` are identical to walk() output,
and `dirfd` is a file descriptor referring to the directory `dirpath`.
The advantage of fwalk() over walk() is that it's safe against symlink
races (when follow_symlinks is False).
If dir_fd is not None, it should be a file descriptor open to a directory,
and top should be relative; top will then be relative to that directory.
(dir_fd is always supported for fwalk.)
Caution:
Since fwalk() yields file descriptors, those are only valid until the
next iteration step, so you should dup() them if you want to keep them
for a longer period.
Example:
import os
for root, dirs, files, rootfd in os.fwalk('python/Lib/email'):
print(root, "consumes", end="")
print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]),
end="")
print("bytes in", len(files), "non-directory files")
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd)
topfd = open(top, O_RDONLY, dir_fd=dir_fd)
try:
if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and
path.samestat(orig_st, stat(topfd)))):
yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks)
finally:
close(topfd)
def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks):
# Note: This uses O(depth of the directory tree) file descriptors: if
# necessary, it can be adapted to only require O(1) FDs, see issue
# #13734.
names = listdir(topfd)
dirs, nondirs = [], []
for name in names:
try:
# Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with
# walk() which reports symlinks to directories as directories.
# We do however check for symlinks before recursing into
# a subdirectory.
if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode):
dirs.append(name)
else:
nondirs.append(name)
except FileNotFoundError:
try:
# Add dangling symlinks, ignore disappeared files
if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False)
.st_mode):
nondirs.append(name)
except FileNotFoundError:
continue
if topdown:
yield toppath, dirs, nondirs, topfd
for name in dirs:
try:
orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks)
dirfd = open(name, O_RDONLY, dir_fd=topfd)
except OSError as err:
if onerror is not None:
onerror(err)
return
try:
if follow_symlinks or path.samestat(orig_st, stat(dirfd)):
dirpath = path.join(toppath, name)
yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks)
finally:
close(dirfd)
if not topdown:
yield toppath, dirs, nondirs, topfd
__all__.append("fwalk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execvp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
exec_func = execve
argrest = (args, env)
else:
exec_func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
exec_func(file, *argrest)
return
last_exc = saved_exc = None
saved_tb = None
path_list = get_exec_path(env)
if name != 'nt':
file = fsencode(file)
path_list = map(fsencode, path_list)
for dir in path_list:
fullname = path.join(dir, file)
try:
exec_func(fullname, *argrest)
except OSError as e:
last_exc = e
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise saved_exc.with_traceback(saved_tb)
raise last_exc.with_traceback(tb)
def get_exec_path(env=None):
"""Returns the sequence of directories that will be searched for the
named executable (similar to a shell) when launching a process.
*env* must be an environment variable dict or None. If *env* is None,
os.environ will be used.
"""
# Use a local import instead of a global import to limit the number of
# modules loaded at startup: the os module is always loaded at startup by
# Python. It may also avoid a bootstrap issue.
import warnings
if env is None:
env = environ
# {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a
# BytesWarning when using python -b or python -bb: ignore the warning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
try:
path_list = env.get('PATH')
except TypeError:
path_list = None
if supports_bytes_environ:
try:
path_listb = env[b'PATH']
except (KeyError, TypeError):
pass
else:
if path_list is not None:
raise ValueError(
"env cannot contain 'PATH' and b'PATH' keys")
path_list = path_listb
if path_list is not None and isinstance(path_list, bytes):
path_list = fsdecode(path_list)
if path_list is None:
path_list = defpath
return path_list.split(pathsep)
# Change environ to automatically call putenv(), unsetenv if they exist.
from _collections_abc import MutableMapping
class _Environ(MutableMapping):
def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv):
self.encodekey = encodekey
self.decodekey = decodekey
self.encodevalue = encodevalue
self.decodevalue = decodevalue
self.putenv = putenv
self.unsetenv = unsetenv
self._data = data
def __getitem__(self, key):
try:
value = self._data[self.encodekey(key)]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
return self.decodevalue(value)
def __setitem__(self, key, value):
key = self.encodekey(key)
value = self.encodevalue(value)
self.putenv(key, value)
self._data[key] = value
def __delitem__(self, key):
encodedkey = self.encodekey(key)
self.unsetenv(encodedkey)
try:
del self._data[encodedkey]
except KeyError:
# raise KeyError with the original key value
raise KeyError(key) from None
def __iter__(self):
for key in self._data:
yield self.decodekey(key)
def __len__(self):
return len(self._data)
def __repr__(self):
return 'environ({{{}}})'.format(', '.join(
('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value))
for key, value in self._data.items())))
def copy(self):
return dict(self)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
try:
_putenv = putenv
except NameError:
_putenv = lambda key, value: None
else:
if "putenv" not in __all__:
__all__.append("putenv")
try:
_unsetenv = unsetenv
except NameError:
_unsetenv = lambda key: _putenv(key, "")
else:
if "unsetenv" not in __all__:
__all__.append("unsetenv")
def _createenviron():
if name == 'nt':
# Where Env Var Names Must Be UPPERCASE
def check_str(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value
encode = check_str
decode = str
def encodekey(key):
return encode(key).upper()
data = {}
for key, value in environ.items():
data[encodekey(key)] = value
else:
# Where Env Var Names Can Be Mixed Case
encoding = sys.getfilesystemencoding()
def encode(value):
if not isinstance(value, str):
raise TypeError("str expected, not %s" % type(value).__name__)
return value.encode(encoding, 'surrogateescape')
def decode(value):
return value.decode(encoding, 'surrogateescape')
encodekey = encode
data = environ
return _Environ(data,
encodekey, decode,
encode, decode,
_putenv, _unsetenv)
# unicode environ
environ = _createenviron()
del _createenviron
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are str."""
return environ.get(key, default)
supports_bytes_environ = (name != 'nt')
__all__.extend(("getenv", "supports_bytes_environ"))
if supports_bytes_environ:
def _check_bytes(value):
if not isinstance(value, bytes):
raise TypeError("bytes expected, not %s" % type(value).__name__)
return value
# bytes environ
environb = _Environ(environ._data,
_check_bytes, bytes,
_check_bytes, bytes,
_putenv, _unsetenv)
del _check_bytes
def getenvb(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
key, default and the result are bytes."""
return environb.get(key, default)
__all__.extend(("environb", "getenvb"))
def _fscodec():
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
errors = 'strict'
else:
errors = 'surrogateescape'
def fsencode(filename):
"""
Encode filename to the filesystem encoding with 'surrogateescape' error
handler, return bytes unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, bytes):
return filename
elif isinstance(filename, str):
return filename.encode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
def fsdecode(filename):
"""
Decode filename from the filesystem encoding with 'surrogateescape' error
handler, return str unchanged. On Windows, use 'strict' error handler if
the file system encoding is 'mbcs' (which is the default encoding).
"""
if isinstance(filename, str):
return filename
elif isinstance(filename, bytes):
return filename.decode(encoding, errors)
else:
raise TypeError("expect bytes or str, not %s" % type(filename).__name__)
return fsencode, fsdecode
fsencode, fsdecode = _fscodec()
del _fscodec
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
__all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"])
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise OSError("Not stopped, signaled or exited???")
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
__all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"])
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnl", "spawnle"])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnlp", "spawnlpe"])
# Supply os.popen()
def popen(cmd, mode="r", buffering=-1):
if not isinstance(cmd, str):
raise TypeError("invalid cmd type (%s, expected string)" % type(cmd))
if mode not in ("r", "w"):
raise ValueError("invalid mode %r" % mode)
if buffering == 0 or buffering is None:
raise ValueError("popen() does not support unbuffered streams")
import subprocess, io
if mode == "r":
proc = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdout), proc)
else:
proc = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
bufsize=buffering)
return _wrap_close(io.TextIOWrapper(proc.stdin), proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close:
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if name == 'nt':
return returncode
else:
return returncode << 8 # Shift left to match old behavior
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
# Supply os.fdopen()
def fdopen(fd, *args, **kwargs):
if not isinstance(fd, int):
raise TypeError("invalid fd type (%s, expected integer)" % type(fd))
import io
return io.open(fd, *args, **kwargs)
|
dyyi/moneybook
|
venv/Lib/os.py
|
Python
|
apache-2.0
| 35,422
|
[
"VisIt"
] |
5df03288f86a413e8c9c7290bffdc944b3b9a521473c564f95fe0749965a7723
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import numpy as np
from fractions import Fraction
try:
from math import gcd
except ImportError:
from fractions import gcd
from itertools import groupby, product
from string import ascii_lowercase
from warnings import warn
import logging
import math
import warnings
from monty.fractions import lcm
from monty.json import MSONable
from pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie
from pymatgen.transformations.transformation_abc import AbstractTransformation
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation, OrderDisorderedStructureTransformation
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor, EnumError
from pymatgen.analysis.ewald import EwaldSummation
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.structure_prediction.substitution_probability import \
SubstitutionPredictor
from pymatgen.analysis.structure_matcher import StructureMatcher, \
SpinComparator
from pymatgen.analysis.energy_models import SymmetryModel
from pymatgen.analysis.bond_valence import BVAnalyzer
from pymatgen.core.surface import SlabGenerator
from pymatgen.electronic_structure.core import Spin
"""
This module implements more advanced transformations.
"""
__author__ = "Shyue Ping Ong, Stephen Dacek, Anubhav Jain, Matthew Horton"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 24, 2012"
logger = logging.getLogger(__name__)
class ChargeBalanceTransformation(AbstractTransformation):
"""
This is a transformation that disorders a structure to make it charge
balanced, given an oxidation state-decorated structure.
Args:
charge_balance_sp: specie to add or remove. Currently only removal
is supported
"""
def __init__(self, charge_balance_sp):
self.charge_balance_sp = str(charge_balance_sp)
def apply_transformation(self, structure):
charge = structure.charge
specie = get_el_sp(self.charge_balance_sp)
num_to_remove = charge / specie.oxi_state
num_in_structure = structure.composition[specie]
removal_fraction = num_to_remove / num_in_structure
if removal_fraction < 0:
raise ValueError("addition of specie not yet supported by "
"ChargeBalanceTransformation")
trans = SubstitutionTransformation(
{self.charge_balance_sp: {
self.charge_balance_sp: 1 - removal_fraction}})
return trans.apply_transformation(structure)
def __str__(self):
return "Charge Balance Transformation : " + \
"Species to remove = {}".format(str(self.charge_balance_sp))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return False
class SuperTransformation(AbstractTransformation):
"""
This is a transformation that is inherently one-to-many. It is constructed
from a list of transformations and returns one structure for each
transformation. The primary use for this class is extending a transmuter
object.
Args:
transformations ([transformations]): List of transformations to apply
to a structure. One transformation is applied to each output
structure.
nstructures_per_trans (int): If the transformations are one-to-many and,
nstructures_per_trans structures from each transformation are
added to the full list. Defaults to 1, i.e., only best structure.
"""
def __init__(self, transformations, nstructures_per_trans=1):
self._transformations = transformations
self.nstructures_per_trans = nstructures_per_trans
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SuperTransformation has no single best structure"
" output. Must use return_ranked_list")
structures = []
for t in self._transformations:
if t.is_one_to_many:
for d in t.apply_transformation(
structure,
return_ranked_list=self.nstructures_per_trans):
d["transformation"] = t
structures.append(d)
else:
structures.append(
{"transformation": t,
"structure": t.apply_transformation(structure)})
return structures
def __str__(self):
return "Super Transformation : Transformations = " + \
"{}".format(" ".join([str(t) for t in self._transformations]))
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class MultipleSubstitutionTransformation(object):
"""
Performs multiple substitutions on a structure. For example, can do a
fractional replacement of Ge in LiGePS with a list of species, creating one
structure for each substitution. Ordering is done using a dummy element so
only one ordering must be done per substitution oxidation state. Charge
balancing of the structure is optionally performed.
.. note::
There are no checks to make sure that removal fractions are possible
and rounding may occur. Currently charge balancing only works for
removal of species.
"""
def __init__(self, sp_to_replace, r_fraction, substitution_dict,
charge_balance_species=None, order=True):
"""
Performs multiple fractional substitutions on a transmuter.
Args:
sp_to_replace: species to be replaced
r_fraction: fraction of that specie to replace
substitution_dict: dictionary of the format
{2: ["Mg", "Ti", "V", "As", "Cr", "Ta", "N", "Nb"],
3: ["Ru", "Fe", "Co", "Ce", "As", "Cr", "Ta", "N", "Nb"],
4: ["Ru", "V", "Cr", "Ta", "N", "Nb"],
5: ["Ru", "W", "Mn"]
}
The number is the charge used for each of the list of elements
(an element can be present in multiple lists)
charge_balance_species: If specified, will balance the charge on
the structure using that specie.
"""
self.sp_to_replace = sp_to_replace
self.r_fraction = r_fraction
self.substitution_dict = substitution_dict
self.charge_balance_species = charge_balance_species
self.order = order
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("MultipleSubstitutionTransformation has no single"
" best structure output. Must use"
" return_ranked_list.")
outputs = []
for charge, el_list in self.substitution_dict.items():
mapping = {}
if charge > 0:
sign = "+"
else:
sign = "-"
dummy_sp = "X{}{}".format(str(charge), sign)
mapping[self.sp_to_replace] = {
self.sp_to_replace: 1 - self.r_fraction,
dummy_sp: self.r_fraction}
trans = SubstitutionTransformation(mapping)
dummy_structure = trans.apply_transformation(structure)
if self.charge_balance_species is not None:
cbt = ChargeBalanceTransformation(self.charge_balance_species)
dummy_structure = cbt.apply_transformation(dummy_structure)
if self.order:
trans = OrderDisorderedStructureTransformation()
dummy_structure = trans.apply_transformation(dummy_structure)
for el in el_list:
if charge > 0:
sign = "+"
else:
sign = "-"
st = SubstitutionTransformation(
{"X{}+".format(str(charge)): "{}{}{}".format(el, charge,
sign)})
new_structure = st.apply_transformation(dummy_structure)
outputs.append({"structure": new_structure})
return outputs
def __str__(self):
return "Multiple Substitution Transformation : Substitution on " + \
"{}".format(self.sp_to_replace)
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class EnumerateStructureTransformation(AbstractTransformation):
"""
Order a disordered structure using enumlib. For complete orderings, this
generally produces fewer structures that the OrderDisorderedStructure
transformation, and at a much faster speed.
Args:
min_cell_size:
The minimum cell size wanted. Must be an int. Defaults to 1.
max_cell_size:
The maximum cell size wanted. Must be an int. Defaults to 1.
symm_prec:
Tolerance to use for symmetry.
refine_structure:
This parameter has the same meaning as in enumlib_caller.
If you are starting from a structure that has been relaxed via
some electronic structure code, it is usually much better to
start with symmetry determination and then obtain a refined
structure. The refined structure have cell parameters and
atomic positions shifted to the expected symmetry positions,
which makes it much less sensitive precision issues in enumlib.
If you are already starting from an experimental cif, refinment
should have already been done and it is not necessary. Defaults
to False.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
max_disordered_sites (int):
An alternate parameter to max_cell size. Will sequentially try
larger and larger cell sizes until (i) getting a result or (ii)
the number of disordered sites in the cell exceeds
max_disordered_sites. Must set max_cell_size to None when using
this parameter.
sort_criteria (str): Sort by Ewald energy ("ewald", must have oxidation
states and slow) or by number of sites ("nsites", much faster).
timeout (float): timeout in minutes to pass to EnumlibAdaptor
"""
def __init__(self, min_cell_size=1, max_cell_size=1, symm_prec=0.1,
refine_structure=False, enum_precision_parameter=0.001,
check_ordered_symmetry=True, max_disordered_sites=None,
sort_criteria="ewald", timeout=None):
self.symm_prec = symm_prec
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.refine_structure = refine_structure
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
self.max_disordered_sites = max_disordered_sites
self.sort_criteria = sort_criteria
self.timeout = timeout
if max_cell_size and max_disordered_sites:
raise ValueError("Cannot set both max_cell_size and "
"max_disordered_sites!")
def apply_transformation(self, structure, return_ranked_list=False):
"""
Return either a single ordered structure or a sequence of all ordered
structures.
Args:
structure: Structure to order.
return_ranked_list (bool): Whether or not multiple structures are
returned. If return_ranked_list is a number, that number of
structures is returned.
Returns:
Depending on returned_ranked list, either a transformed structure
or a list of dictionaries, where each dictionary is of the form
{"structure" = .... , "other_arguments"}
The list of ordered structures is ranked by ewald energy / atom, if
the input structure is an oxidation state decorated structure.
Otherwise, it is ranked by number of sites, with smallest number of
sites first.
"""
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if self.refine_structure:
finder = SpacegroupAnalyzer(structure, self.symm_prec)
structure = finder.get_refined_structure()
contains_oxidation_state = all(
[hasattr(sp, "oxi_state") and sp.oxi_state != 0 for sp in
structure.composition.elements]
)
structures = None
if structure.is_ordered:
warn("Enumeration skipped for structure with composition {} "
"because it is ordered".format(structure.composition))
structures = [structure.copy()]
if self.max_disordered_sites:
ndisordered = sum([1 for site in structure if not site.is_ordered])
if ndisordered > self.max_disordered_sites:
raise ValueError(
"Too many disordered sites! ({} > {})".format(
ndisordered, self.max_disordered_sites))
max_cell_sizes = range(self.min_cell_size, int(
math.floor(self.max_disordered_sites / ndisordered)) + 1)
else:
max_cell_sizes = [self.max_cell_size]
for max_cell_size in max_cell_sizes:
adaptor = EnumlibAdaptor(
structure, min_cell_size=self.min_cell_size,
max_cell_size=max_cell_size,
symm_prec=self.symm_prec, refine_structure=False,
enum_precision_parameter=self.enum_precision_parameter,
check_ordered_symmetry=self.check_ordered_symmetry,
timeout=self.timeout)
try:
adaptor.run()
except EnumError:
warn("Unable to enumerate for max_cell_size = %d".format(
max_cell_size))
structures = adaptor.structures
if structures:
break
if structures is None:
raise ValueError("Unable to enumerate")
original_latt = structure.lattice
inv_latt = np.linalg.inv(original_latt.matrix)
ewald_matrices = {}
all_structures = []
for s in structures:
new_latt = s.lattice
transformation = np.dot(new_latt.matrix, inv_latt)
transformation = tuple([tuple([int(round(cell)) for cell in row])
for row in transformation])
if contains_oxidation_state and self.sort_criteria == "ewald":
if transformation not in ewald_matrices:
s_supercell = structure * transformation
ewald = EwaldSummation(s_supercell)
ewald_matrices[transformation] = ewald
else:
ewald = ewald_matrices[transformation]
energy = ewald.compute_sub_structure(s)
all_structures.append({"num_sites": len(s), "energy": energy,
"structure": s})
else:
all_structures.append({"num_sites": len(s), "structure": s})
def sort_func(s):
return s["energy"] / s["num_sites"] \
if contains_oxidation_state and self.sort_criteria == "ewald" \
else s["num_sites"]
self._all_structures = sorted(all_structures, key=sort_func)
if return_ranked_list:
return self._all_structures[0:num_to_return]
else:
return self._all_structures[0]["structure"]
def __str__(self):
return "EnumerateStructureTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class SubstitutionPredictorTransformation(AbstractTransformation):
"""
This transformation takes a structure and uses the structure
prediction module to find likely site substitutions.
Args:
threshold: Threshold for substitution.
**kwargs: Args for SubstitutionProbability class lambda_table, alpha
"""
def __init__(self, threshold=1e-2, **kwargs):
self.kwargs = kwargs
self.threshold = threshold
self._substitutor = SubstitutionPredictor(threshold=threshold,
**kwargs)
def apply_transformation(self, structure, return_ranked_list=False):
if not return_ranked_list:
raise ValueError("SubstitutionPredictorTransformation doesn't"
" support returning 1 structure")
preds = self._substitutor.composition_prediction(
structure.composition, to_this_composition=False)
preds.sort(key=lambda x: x['probability'], reverse=True)
outputs = []
for pred in preds:
st = SubstitutionTransformation(pred['substitutions'])
output = {'structure': st.apply_transformation(structure),
'probability': pred['probability'],
'threshold': self.threshold, 'substitutions': {}}
# dictionary keys have to be converted to strings for JSON
for key, value in pred['substitutions'].items():
output['substitutions'][str(key)] = str(value)
outputs.append(output)
return outputs
def __str__(self):
return "SubstitutionPredictorTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class MagOrderParameterConstraint(MSONable):
def __init__(self, order_parameter,
species_constraints=None,
site_constraint_name=None,
site_constraints=None):
"""
This class can be used to supply MagOrderingTransformation
to just a specific subset of species or sites that satisfy the
provided constraints. This can be useful for setting an order
parameters for, for example, ferrimagnetic structures which
might order on certain motifs, with the global order parameter
dependent on how many sites satisfy that motif.
:param order_parameter (float): any number from 0.0 to 1.0,
typically 0.5 (antiferromagnetic) or 1.0 (ferromagnetic)
:param species_constraint (list): str or list of strings
of Specie symbols that the constraint should apply to
:param site_constraint_name (str): name of the site property
that the constraint should apply to, e.g. "coordination_no"
:param site_constraints (list): list of values of the site
property that the constraints should apply to
"""
# validation
if site_constraints and site_constraints != [None] \
and not site_constraint_name:
raise ValueError("Specify the name of the site constraint.")
elif not site_constraints and site_constraint_name:
raise ValueError("Please specify some site constraints.")
if not isinstance(species_constraints, list):
species_constraints = [species_constraints]
if not isinstance(site_constraints, list):
site_constraints = [site_constraints]
if order_parameter > 1 or order_parameter < 0:
raise ValueError('Order parameter must lie between 0 and 1')
elif order_parameter != 0.5:
warnings.warn("Use care when using a non-standard order parameter, "
"though it can be useful in some cases it can also "
"lead to unintended behavior. Consult documentation.")
self.order_parameter = order_parameter
self.species_constraints = species_constraints
self.site_constraint_name = site_constraint_name
self.site_constraints = site_constraints
def satisfies_constraint(self, site):
"""
Checks if a periodic site satisfies the constraint.
"""
if not site.is_ordered:
return False
if self.species_constraints \
and str(site.specie) in self.species_constraints:
satisfies_constraints = True
else:
satisfies_constraints = False
if self.site_constraint_name \
and self.site_constraint_name in site.properties:
prop = site.properties[self.site_constraint_name]
if prop in self.site_constraints:
satisfies_constraints = True
else:
satisfies_constraints = False
return satisfies_constraints
class MagOrderingTransformation(AbstractTransformation):
def __init__(self, mag_species_spin, order_parameter=0.5,
energy_model=SymmetryModel(), **kwargs):
"""
This transformation takes a structure and returns a list of collinear
magnetic orderings. For disordered structures, make an ordered
approximation first.
:param mag_species_spin: A mapping of elements/species to their
spin magnitudes, e.g. {"Fe3+": 5, "Mn3+": 4}
:param order_parameter (float or list): if float, a specifies a
global order parameter and can take values from 0.0 to 1.0
(e.g. 0.5 for antiferromagnetic or 1.0 for ferromagnetic), if
list has to be a list of
:class: `pymatgen.transformations.advanced_transformations.MagOrderParameterConstraint`
to specify more complicated orderings, see documentation for
MagOrderParameterConstraint more details on usage
:param energy_model: Energy model to rank the returned structures,
see :mod: `pymatgen.analysis.energy_models` for more information (note
that this is not necessarily a physical energy). By default, returned
structures use SymmetryModel() which ranks structures from most
symmetric to least.
:param kwargs: Additional kwargs that are passed to
:class:`EnumerateStructureTransformation` such as min_cell_size etc.
"""
# checking for sensible order_parameter values
if isinstance(order_parameter, float):
# convert to constraint format
order_parameter = [MagOrderParameterConstraint(order_parameter=order_parameter,
species_constraints=
list(mag_species_spin.keys()))]
elif isinstance(order_parameter, list):
ops = [isinstance(item, MagOrderParameterConstraint) for item in order_parameter]
if not any(ops):
raise ValueError("Order parameter not correctly defined.")
else:
raise ValueError("Order parameter not correctly defined.")
self.mag_species_spin = mag_species_spin
# store order parameter constraints as dicts to save implementing
# to/from dict methods for MSONable compatibility
self.order_parameter = [op.as_dict() for op in order_parameter]
self.energy_model = energy_model
self.enum_kwargs = kwargs
@staticmethod
def determine_min_cell(disordered_structure):
"""
Determine the smallest supercell that is able to enumerate
the provided structure with the given order parameter
"""
def lcm(n1, n2):
"""
Find least common multiple of two numbers
"""
return n1 * n2 / gcd(n1, n2)
# assumes all order parameters for a given species are the same
mag_species_order_parameter = {}
mag_species_occurrences = {}
for idx, site in enumerate(disordered_structure):
if not site.is_ordered:
op = max(site.species_and_occu.values())
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(list(site.species_and_occu.keys())[0]).split(",")[0]
if sp in mag_species_order_parameter:
mag_species_occurrences[sp] += 1
else:
mag_species_order_parameter[sp] = op
mag_species_occurrences[sp] = 1
smallest_n = []
for sp, order_parameter in mag_species_order_parameter.items():
denom = Fraction(order_parameter).limit_denominator(100).denominator
num_atom_per_specie = mag_species_occurrences[sp]
n_gcd = gcd(denom, num_atom_per_specie)
smallest_n.append(lcm(int(n_gcd), denom) / n_gcd)
return max(smallest_n)
@staticmethod
def _add_dummy_species(structure, order_parameters):
"""
:param structure: ordered Structure
:param order_parameters: list of MagOrderParameterConstraints
:return: A structure decorated with disordered
DummySpecies on which to perform the enumeration.
Note that the DummySpecies are super-imposed on
to the original sites, to make it easier to
retrieve the original site after enumeration is
performed (this approach is preferred over a simple
mapping since multiple species may have the same
DummySpecie, depending on the constraints specified).
This approach can also preserve site properties even after
enumeration.
"""
dummy_struct = structure.copy()
def generate_dummy_specie():
"""
Generator which returns DummySpecie symbols Mma, Mmb, etc.
"""
subscript_length = 1
while True:
for subscript in product(ascii_lowercase, repeat=subscript_length):
yield "Mm"+"".join(subscript)
subscript_length += 1
dummy_species_gen = generate_dummy_specie()
# one dummy species for each order parameter constraint
dummy_species_symbols = [next(dummy_species_gen) for i in range(len(order_parameters))]
dummy_species = [{
DummySpecie(symbol, properties={'spin': Spin.up}): constraint.order_parameter,
DummySpecie(symbol, properties={'spin': Spin.down}): 1-constraint.order_parameter
} for symbol, constraint in zip(dummy_species_symbols, order_parameters)]
sites_to_add = []
for idx, site in enumerate(dummy_struct):
satisfies_constraints = [c.satisfies_constraint(site) for c in order_parameters]
if satisfies_constraints.count(True) > 1:
# site should either not satisfy any constraints, or satisfy
# one constraint
raise ValueError("Order parameter constraints conflict for site: {}, {}"
.format(str(site.specie), site.properties))
elif any(satisfies_constraints):
dummy_specie_idx = satisfies_constraints.index(True)
dummy_struct.append(
dummy_species[dummy_specie_idx],
site.coords,
site.lattice
)
return dummy_struct
@staticmethod
def _remove_dummy_species(structure):
"""
:return: Structure with dummy species removed, but
their corresponding spin properties merged with the
original sites. Used after performing enumeration.
"""
if not structure.is_ordered:
raise Exception("Something went wrong with enumeration.")
sites_to_remove = []
logger.debug('Dummy species structure:\n{}'.format(str(structure)))
for idx, site in enumerate(structure):
if isinstance(site.specie, DummySpecie):
sites_to_remove.append(idx)
spin = site.specie._properties.get('spin', None)
neighbors = structure.get_neighbors(
site,
0.05, # arbitrary threshold, needs to be << any bond length
# but >> floating point precision issues
include_index=True
)
if len(neighbors) != 1:
raise Exception("This shouldn't happen, found neighbors: {}"
.format(neighbors))
orig_site_idx = neighbors[0][2]
orig_specie = structure[orig_site_idx].specie
new_specie = Specie(orig_specie.symbol,
getattr(orig_specie, 'oxi_state', None),
properties={'spin': spin})
structure.replace(orig_site_idx,
new_specie,
properties=structure[orig_site_idx].properties)
structure.remove_sites(sites_to_remove)
logger.debug('Structure with dummy species removed:\n{}'.format(str(structure)))
return structure
def _add_spin_magnitudes(self, structure):
"""
Replaces Spin.up/Spin.down with spin magnitudes specified
by mag_species_spin.
:param structure:
:return:
"""
for idx, site in enumerate(structure):
if getattr(site.specie, '_properties', None):
spin = site.specie._properties.get('spin', None)
sign = int(spin) if spin else 0
if spin:
new_properties = site.specie._properties.copy()
# this very hacky bit of code only works because we know
# that on disordered sites in this class, all species are the same
# but have different spins, and this is comma-delimited
sp = str(site.specie).split(",")[0]
new_properties.update({
'spin': sign*self.mag_species_spin.get(sp, 0)
})
new_specie = Specie(site.specie.symbol,
getattr(site.specie, 'oxi_state', None),
new_properties)
structure.replace(idx, new_specie,
properties=site.properties)
logger.debug('Structure with spin magnitudes:\n{}'.format(str(structure)))
return structure
def apply_transformation(self, structure, return_ranked_list=False):
"""
Apply MagOrderTransformation to an input structure.
:param structure: Any ordered structure.
:param return_ranked_list: As in other Transformations.
:return:
"""
if not structure.is_ordered:
raise ValueError("Create an ordered approximation of "
"your input structure first.")
# retrieve order parameters
order_parameters = [MagOrderParameterConstraint.from_dict(op_dict)
for op_dict in self.order_parameter]
# add dummy species on which to perform enumeration
structure = self._add_dummy_species(structure, order_parameters)
# trivial case
if structure.is_ordered:
structure = self._remove_dummy_species(structure)
return [structure] if return_ranked_list > 1 else structure
enum_kwargs = self.enum_kwargs.copy()
enum_kwargs["min_cell_size"] = max(
int(self.determine_min_cell(structure)),
enum_kwargs.get("min_cell_size", 1)
)
if enum_kwargs.get("max_cell_size", None):
if enum_kwargs["min_cell_size"] > enum_kwargs["max_cell_size"]:
warnings.warn("Specified max cell size ({}) is smaller "
"than the minimum enumerable cell size ({}), "
"changing max cell size to {}".format(enum_kwargs["max_cell_size"],
enum_kwargs["min_cell_size"],
enum_kwargs["min_cell_size"]))
enum_kwargs["max_cell_size"] = enum_kwargs["min_cell_size"]
else:
enum_kwargs["max_cell_size"] = enum_kwargs["min_cell_size"]
t = EnumerateStructureTransformation(**enum_kwargs)
alls = t.apply_transformation(structure,
return_ranked_list=return_ranked_list)
# handle the fact that EnumerateStructureTransformation can either
# return a single Structure or a list
if isinstance(alls, Structure):
# remove dummy species and replace Spin.up or Spin.down
# with spin magnitudes given in mag_species_spin arg
alls = self._remove_dummy_species(alls)
alls = self._add_spin_magnitudes(alls)
else:
for idx, _ in enumerate(alls):
alls[idx]["structure"] = self._remove_dummy_species(alls[idx]["structure"])
alls[idx]["structure"] = self._add_spin_magnitudes(alls[idx]["structure"])
try:
num_to_return = int(return_ranked_list)
except ValueError:
num_to_return = 1
if num_to_return == 1 or not return_ranked_list:
return alls[0]["structure"] if num_to_return else alls
# remove duplicate structures and group according to energy model
m = StructureMatcher(comparator=SpinComparator())
key = lambda x: SpacegroupAnalyzer(x, 0.1).get_space_group_number()
out = []
for _, g in groupby(sorted([d["structure"] for d in alls],
key=key), key):
g = list(g)
grouped = m.group_structures(g)
out.extend([{"structure": g[0],
"energy": self.energy_model.get_energy(g[0])}
for g in grouped])
self._all_structures = sorted(out, key=lambda d: d["energy"])
return self._all_structures[0:num_to_return]
def __str__(self):
return "MagOrderingTransformation"
def __repr__(self):
return self.__str__()
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
def _find_codopant(target, oxidation_state, allowed_elements=None):
"""
Finds the element from "allowed elements" that (i) possesses the desired
"oxidation state" and (ii) is closest in ionic radius to the target specie
Args:
target: (Specie) provides target ionic radius.
oxidation_state: (float) codopant oxidation state.
allowed_elements: ([str]) List of allowed elements. If None,
all elements are tried.
Returns:
(Specie) with oxidation_state that has ionic radius closest to
target.
"""
ref_radius = target.ionic_radius
candidates = []
symbols = allowed_elements or [el.symbol for el in Element]
for sym in symbols:
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sp = Specie(sym, oxidation_state)
r = sp.ionic_radius
if r is not None:
candidates.append((r, sp))
except:
pass
return min(candidates, key=lambda l: abs(l[0]/ref_radius - 1))[1]
class DopingTransformation(AbstractTransformation):
"""
A transformation that performs doping of a structure.
"""
def __init__(self, dopant, ionic_radius_tol=float("inf"), min_length=10,
alio_tol=0, codopant=False, max_structures_per_enum=100,
allowed_doping_species=None, **kwargs):
"""
Args:
dopant (Specie-like): E.g., Al3+. Must have oxidation state.
ionic_radius_tol (float): E.g., Fractional allowable ionic radii
mismatch for dopant to fit into a site. Default of inf means
that any dopant with the right oxidation state is allowed.
min_Length (float): Min. lattice parameter between periodic
images of dopant. Defaults to 10A for now.
alio_tol (int): If this is not 0, attempt will be made to dope
sites with oxidation_states +- alio_tol of the dopant. E.g.,
1 means that the ions like Ca2+ and Ti4+ are considered as
potential doping sites for Al3+.
codopant (bool): If True, doping will be carried out with a
codopant to maintain charge neutrality. Otherwise, vacancies
will be used.
max_structures_per_enum (float): Maximum number of structures to
return per enumeration. Note that there can be more than one
candidate doping site, and each site enumeration will return at
max max_structures_per_enum structures. Defaults to 100.
allowed_doping_species (list): Species that are allowed to be
doping sites. This is an inclusionary list. If specified,
any sites which are not
\\*\\*kwargs:
Same keyword args as :class:`EnumerateStructureTransformation`,
i.e., min_cell_size, etc.
"""
self.dopant = get_el_sp(dopant)
self.ionic_radius_tol = ionic_radius_tol
self.min_length = min_length
self.alio_tol = alio_tol
self.codopant = codopant
self.max_structures_per_enum = max_structures_per_enum
self.allowed_doping_species = allowed_doping_species
self.kwargs = kwargs
def apply_transformation(self, structure, return_ranked_list=False):
"""
Args:
structure (Structure): Input structure to dope
Returns:
[{"structure": Structure, "energy": float}]
"""
comp = structure.composition
logger.info("Composition: %s" % comp)
for sp in comp:
try:
sp.oxi_state
except AttributeError:
analyzer = BVAnalyzer()
structure = analyzer.get_oxi_state_decorated_structure(
structure)
comp = structure.composition
break
ox = self.dopant.oxi_state
radius = self.dopant.ionic_radius
compatible_species = [
sp for sp in comp if sp.oxi_state == ox and
abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol]
if (not compatible_species) and self.alio_tol:
# We only consider aliovalent doping if there are no compatible
# isovalent species.
compatible_species = [
sp for sp in comp
if abs(sp.oxi_state - ox) <= self.alio_tol and
abs(sp.ionic_radius / radius - 1) < self.ionic_radius_tol and
sp.oxi_state * ox >= 0]
if self.allowed_doping_species is not None:
# Only keep allowed doping species.
compatible_species = [
sp for sp in compatible_species
if sp in [get_el_sp(s) for s in self.allowed_doping_species]]
logger.info("Compatible species: %s" % compatible_species)
lengths = structure.lattice.abc
scaling = [max(1, int(round(math.ceil(self.min_length/x))))
for x in lengths]
logger.info("Lengths are %s" % str(lengths))
logger.info("Scaling = %s" % str(scaling))
all_structures = []
t = EnumerateStructureTransformation(**self.kwargs)
for sp in compatible_species:
supercell = structure * scaling
nsp = supercell.composition[sp]
if sp.oxi_state == ox:
supercell.replace_species({sp: {sp: (nsp - 1)/nsp,
self.dopant: 1/nsp}})
logger.info("Doping %s for %s at level %.3f" % (
sp, self.dopant, 1 / nsp))
elif self.codopant:
codopant = _find_codopant(sp, 2 * sp.oxi_state - ox)
supercell.replace_species({sp: {sp: (nsp - 2) / nsp,
self.dopant: 1 / nsp,
codopant: 1 / nsp}})
logger.info("Doping %s for %s + %s at level %.3f" % (
sp, self.dopant, codopant, 1 / nsp))
elif abs(sp.oxi_state) < abs(ox):
# Strategy: replace the target species with a
# combination of dopant and vacancy.
# We will choose the lowest oxidation state species as a
# vacancy compensation species as it is likely to be lower in
# energy
sp_to_remove = min([s for s in comp if s.oxi_state * ox > 0],
key=lambda ss: abs(ss.oxi_state))
if sp_to_remove == sp:
common_charge = lcm(int(abs(sp.oxi_state)), int(abs(ox)))
ndopant = common_charge / abs(ox)
nsp_to_remove = common_charge / abs(sp.oxi_state)
logger.info("Doping %d %s with %d %s." %
(nsp_to_remove, sp, ndopant, self.dopant))
supercell.replace_species(
{sp: {sp: (nsp - nsp_to_remove) / nsp,
self.dopant: ndopant / nsp}})
else:
ox_diff = int(abs(round(sp.oxi_state - ox)))
vac_ox = int(abs(sp_to_remove.oxi_state))
common_charge = lcm(vac_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / vac_ox
nx = supercell.composition[sp_to_remove]
logger.info("Doping %d %s with %s and removing %d %s." %
(ndopant, sp, self.dopant,
nx_to_remove, sp_to_remove))
supercell.replace_species(
{sp: {sp: (nsp - ndopant) / nsp,
self.dopant: ndopant / nsp},
sp_to_remove: {
sp_to_remove: (nx - nx_to_remove) / nx}})
elif abs(sp.oxi_state) > abs(ox):
# Strategy: replace the target species with dopant and also
# remove some opposite charged species for charge neutrality
if ox > 0:
sp_to_remove = max(supercell.composition.keys(),
key=lambda el: el.X)
else:
sp_to_remove = min(supercell.composition.keys(),
key=lambda el: el.X)
# Confirm species are of opposite oxidation states.
assert sp_to_remove.oxi_state * sp.oxi_state < 0
ox_diff = int(abs(round(sp.oxi_state - ox)))
anion_ox = int(abs(sp_to_remove.oxi_state))
nx = supercell.composition[sp_to_remove]
common_charge = lcm(anion_ox, ox_diff)
ndopant = common_charge / ox_diff
nx_to_remove = common_charge / anion_ox
logger.info("Doping %d %s with %s and removing %d %s." %
(ndopant, sp, self.dopant,
nx_to_remove, sp_to_remove))
supercell.replace_species(
{sp: {sp: (nsp - ndopant) / nsp,
self.dopant: ndopant / nsp},
sp_to_remove: {sp_to_remove: (nx - nx_to_remove)/nx}})
ss = t.apply_transformation(
supercell, return_ranked_list=self.max_structures_per_enum)
logger.info("%s distinct structures" % len(ss))
all_structures.extend(ss)
logger.info("Total %s doped structures" % len(all_structures))
if return_ranked_list:
return all_structures[:return_ranked_list]
return all_structures[0]["structure"]
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return True
class SlabTransformation(AbstractTransformation):
"""
A transformation that creates a slab from a structure.
"""
def __init__(self, miller_index, min_slab_size, min_vacuum_size,
lll_reduce=False, center_slab=False,
in_unit_planes=False, primitive=True,
max_normal_search=None, shift=0, tol=0.1):
"""
Args:
miller_index (3-tuple or list): miller index of slab
min_slab_size (float): minimum slab size in angstroms
min_vacuum_size (float): minimum size of vacuum
lll_reduce (bool): whether to apply LLL reduction
center_slab (bool): whether to center the slab
primitive (bool): whether to reduce slabs to most primitive cell
max_normal_search (int): maximum index to include in linear
combinations of indices to find c lattice vector orthogonal
to slab surface
shift (float): shift to get termination
tol (float): tolerance for primitive cell finding
"""
self.miller_index = miller_index
self.min_slab_size = min_slab_size
self.min_vacuum_size = min_vacuum_size
self.lll_reduce = lll_reduce
self.center_slab = center_slab
self.in_unit_planes = in_unit_planes
self.primitive = primitive
self.max_normal_search = max_normal_search
self.shift = shift
self.tol = 0.1
def apply_transformation(self, structure):
sg = SlabGenerator(structure, self.miller_index, self.min_slab_size,
self.min_vacuum_size, self.lll_reduce,
self.center_slab, self.in_unit_planes,
self.primitive, self.max_normal_search)
slab = sg.get_slab(self.shift, self.tol)
return slab
@property
def inverse(self):
return None
@property
def is_one_to_many(self):
return None
|
czhengsci/pymatgen
|
pymatgen/transformations/advanced_transformations.py
|
Python
|
mit
| 47,794
|
[
"pymatgen"
] |
76718b99b5e7ab9d595bf5ccf356a0d37007e67cec1607d0f855f69823492809
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
import logging
LOGGER = logging.getLogger( 'camelot.view.controls.editors.onetomanyeditor' )
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
from wideeditor import WideEditor
from customeditor import CustomEditor
from camelot.view.art import Icon
from camelot.view.model_thread import gui_function, model_function, post
from camelot.core.utils import ugettext as _
from camelot.view import register
class One2ManyEditor(CustomEditor, WideEditor):
new_icon = Icon('tango/16x16/actions/document-new.png')
def __init__( self,
admin = None,
parent = None,
create_inline = False,
vertical_header_clickable = True,
**kw ):
"""
:param admin: the Admin interface for the objects on the one side of the
relation
:param create_inline: if False, then a new entity will be created within a
new window, if True, it will be created inline
:param vertical_header_clickable: True if the vertical header is clickable by the user, False if not.
after creating the editor, set_value needs to be called to set the
actual data to the editor
"""
CustomEditor.__init__( self, parent )
layout = QtGui.QHBoxLayout()
layout.setContentsMargins( 0, 0, 0, 0 )
#
# Setup table
#
from camelot.view.controls.tableview import TableWidget
# parent set by layout manager
self.table = TableWidget(lines_per_row=admin.lines_per_row,
columns_frozen=admin.list_columns_frozen)
rowHeight = QtGui.QFontMetrics( self.font() ).height() + 5
layout.setSizeConstraint( QtGui.QLayout.SetNoConstraint )
self.setSizePolicy( QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding )
self.setMinimumHeight( rowHeight*5 )
if vertical_header_clickable:
self.table.verticalHeader().sectionClicked.connect(
self.createFormForIndex
)
self.admin = admin
self.create_inline = create_inline
self.add_button = None
self.copy_button = None
self.delete_button = None
layout.addWidget( self.table )
self.setupButtons( layout )
self.setLayout( layout )
self.model = None
self._new_message = None
def set_field_attributes(self, editable=True, new_message=None, **kwargs):
self.add_button.setEnabled(editable)
self.copy_button.setEnabled(editable)
self.delete_button.setEnabled(editable)
self._new_message = new_message
def setupButtons( self, layout ):
button_layout = QtGui.QVBoxLayout()
button_layout.setSpacing( 0 )
self.delete_button = QtGui.QToolButton()
icon = Icon( 'tango/16x16/places/user-trash.png' ).getQIcon()
self.delete_button.setIcon( icon )
self.delete_button.setAutoRaise( True )
self.delete_button.setToolTip(_('Delete'))
self.delete_button.clicked.connect(self.deleteSelectedRows)
self.add_button = QtGui.QToolButton()
icon = self.new_icon.getQIcon()
self.add_button.setIcon( icon )
self.add_button.setAutoRaise( True )
self.add_button.setToolTip(_('New'))
self.add_button.clicked.connect(self.newRow)
self.copy_button = QtGui.QToolButton()
icon = Icon( 'tango/16x16/actions/edit-copy.png' ).getQIcon()
self.copy_button.setIcon( icon )
self.copy_button.setAutoRaise( True )
self.copy_button.setToolTip(_('Copy'))
self.copy_button.clicked.connect(self.copy_selected_rows)
export_button = QtGui.QToolButton()
export_button.setIcon( Icon( 'tango/16x16/mimetypes/x-office-spreadsheet.png' ).getQIcon() )
export_button.setAutoRaise( True )
export_button.setToolTip(_('Export as spreadsheet'))
export_button.clicked.connect(self.exportToExcel)
button_layout.addStretch()
button_layout.addWidget( self.add_button )
button_layout.addWidget( self.copy_button )
button_layout.addWidget( self.delete_button )
button_layout.addWidget( export_button )
layout.addLayout( button_layout )
def exportToExcel( self ):
from camelot.view.export.excel import open_data_with_excel
def export():
title = self.admin.get_verbose_name_plural()
columns = self.admin.get_columns()
if self.model:
data = list( self.model.getData() )
open_data_with_excel( title, columns, data )
post( export )
def getModel( self ):
return self.model
@QtCore.pyqtSlot(object)
def update_delegates( self, *args ):
if self.model:
delegate = self.model.getItemDelegate()
if delegate:
self.table.setItemDelegate( delegate )
for i in range( self.model.columnCount() ):
txtwidth = self.model.headerData( i, Qt.Horizontal, Qt.SizeHintRole ).toSize().width()
colwidth = self.table.columnWidth( i )
self.table.setColumnWidth( i, max( txtwidth, colwidth ) )
def set_value( self, model ):
model = CustomEditor.set_value( self, model )
if model and model != self.model:
self.model = model
self.table.setModel( model )
register.register( self.model, self.table )
post( model._extend_cache, self.update_delegates )
@gui_function
def activate_editor( self, _row ):
# return
# Activating this code can cause segfaults
# see ticket 765 in web issues
#
# The segfault seems no longer there after disabling the
# editor before setting a new model, but the code below
# seems to have no effect.
index = self.model.index( _row, 0 )
self.table.scrollToBottom()
self.table.setCurrentIndex( index )
self.table.edit( index )
def newRow( self ):
from camelot.view.workspace import show_top_level
if self._new_message:
QtGui.QMessageBox.information(self, _('New'), self._new_message)
return
if self.create_inline:
@model_function
def create():
o = self.admin.entity()
row = self.model.insertEntityInstance( 0, o )
return row
post( create, self.activate_editor )
else:
form = self.admin.create_new_view( parent = None,
oncreate = lambda o: self.model.insertEntityInstance( 0, o ),
onexpunge = lambda o: self.model.remove_objects( o ) )
show_top_level( form, self )
def copy_selected_rows( self ):
"""Copy the selected rows in this tableview"""
LOGGER.debug( 'delete selected rows called' )
for row in set( map( lambda x: x.row(), self.table.selectedIndexes() ) ):
self.model.copy_row( row )
def deleteSelectedRows( self ):
"""Delete the selected rows in this tableview"""
LOGGER.debug( 'delete selected rows called' )
self.table.close_editor()
self.model.remove_rows( set( map( lambda x: x.row(), self.table.selectedIndexes() ) ) )
def createFormForIndex( self, index ):
from camelot.view.workspace import show_top_level
from camelot.view.proxy.collection_proxy import CollectionProxy
model = CollectionProxy( self.admin,
self.model.collection_getter,
self.admin.get_fields,
max_number_of_rows = 1,
edits = None )
form = self.admin.create_form_view( u'', model, self.model.map_to_source(index) )
show_top_level( form, self )
|
kurtraschke/camelot
|
camelot/view/controls/editors/one2manyeditor.py
|
Python
|
gpl-2.0
| 9,012
|
[
"VisIt"
] |
43f3744dd9d936f0f14cc4c4b9ad5421369ad737d7ae287ab445afa1ce2d1b1a
|
# IPython log file
# cd ~/projects/play
# make sure cache.py is around
get_ipython().run_line_magic('run', '-i cache.py')
_correlate_sparse_offsets.inspect_types()
_correlate_sparse_offsets.inspect_types(pretty=True)
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('pinfo', '_correlate_sparse_offsets.inspect_types')
_correlate_sparse_offsets.inspect_types()
import numpy as np
from numba import jit
def find_instr(func, keyword, sig=0, limit=5):
count = 0
for l in func.inspect_asm(func.signatures[sig]).split('\n'):
if keyword in l:
count += 1
print(l)
if count >= limit:
break
if count == 0:
print('No instructions found')
@jit(nopython=True)
def sqdiff(x, y):
out = np.empty_like(x)
for i in range(x.shape[0]):
out[i] = (x[i] - y[i])**2
return out
x32 = np.linspace(1, 2, 10000, dtype=np.float32)
y32 = np.linspace(2, 3, 10000, dtype=np.float32)
sqdiff(x32, y32)
x64 = x32.astype(np.float64)
y64 = y32.astype(np.float64)
sqdiff(x64, y64)
sqdiff.signatures
get_ipython().run_line_magic('timeit', 'sqdiff(x32, y32)')
get_ipython().run_line_magic('timeit', 'sqdiff(x64, y64)')
print('float32:')
find_instr(sqdiff, keyword='subp', sig=0)
print('---\nfloat64:')
find_instr(sqdiff, keyword='subp', sig=1)
@jit(nopython=True)
def frac_diff1(x, y):
out = np.empty_like(x)
for i in range(x.shape[0]):
out[i] = 2 * (x[i] - y[i]) / (x[i] + y[i])
return out
frac_diff1(x32, y32)
find_instr(frac_diff1, keyword='subp', sig=0)
@jit(nopython=True, error_model='numpy')
def frac_diff2(x, y):
out = np.empty_like(x)
for i in range(x.shape[0]):
out[i] = 2 * (x[i] - y[i]) / (x[i] + y[i])
return out
frac_diff2(x32, y32)
find_instr(frac_diff2, keyword='subp', sig=0)
frac_diff2(x64, y64)
get_ipython().run_line_magic('timeit', 'frac_diff2(x32, y32)')
get_ipython().run_line_magic('timeit', 'frac_diff2(x64, y64)')
frac_diff2.inspect_types(pretty=True)
@jit(nopython=True, error_model='numpy')
def frac_diff3(x, y):
out = np.empty_like(x)
dt = x.dtype # Cast the constant using the dtype of the input
for i in range(x.shape[0]):
# Could also use np.float32(2) to always use same type, regardless of input
out[i] = dt.type(2) * (x[i] - y[i]) / (x[i] + y[i])
return out
frac_diff3(x32, y32)
frac_diff3(x64, y64)
get_ipython().run_line_magic('timeit', 'frac_diff3(x32, y32)')
get_ipython().run_line_magic('timeit', 'frac_diff3(x64, y64)')
SQRT_2PI = np.sqrt(2 * np.pi)
@jit(nopython=True, error_model='numpy', fastmath=True)
def kde(x, means, widths):
'''Compute value of gaussian kernel density estimate.
x - location of evaluation
means - array of kernel means
widths - array of kernel widths
'''
n = means.shape[0]
acc = 0.
for i in range(n):
acc += np.exp( -0.5 * ((x - means[i]) / widths[i])**2 ) / widths[i]
return acc / SQRT_2PI / n
@jit(nopython=True)
def sqdiff_indirect(x, y, indirection):
out = np.empty_like(x)
for i in range(x.shape[0]):
out[indirection[i]] = (x[indirection[i]] - y[indirection[i]])**2
return out
indirection = np.arange(x32.size)
get_ipython().run_line_magic('timeit', 'sqdiff_indirect(x32, y32, indirection)')
get_ipython().run_line_magic('timeit', 'sqdiff_indirect(x64, y64, indirection)')
print('float32:')
find_instr(sqdiff_indirect, keyword='subp', sig=0)
print('---\nfloat64:')
find_instr(sqdiff_indirect, keyword='subp', sig=1)
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('run', '-i cache.py')
find_instr(_correlate_sparse_offsets, keyword='subp', sig=0)
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('run', '-i cache.py')
find_instr(_correlate_sparse_offsets, keyword='subp', sig=0)
_correlate_sparse_offsets.inspect_types()
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('run', '-i cache.py')
find_instr(_correlate_sparse_offsets, keyword='subp', sig=0)
find_instr(_correlate_sparse_offsets, keyword='mulp', sig=0)
find_instr(_correlate_sparse_offsets, keyword='p', sig=0)
find_instr(_correlate_sparse_offsets, keyword='pd', sig=1)
find_instr(_correlate_sparse_offsets, keyword='pd', sig=0)
find_instr(_correlate_sparse_offsets, keyword='pf', sig=1)
find_instr(_correlate_sparse_offsets, keyword='ps', sig=1)
get_ipython().run_line_magic('run', '-i cache.py')
get_ipython().run_line_magic('run', '-i cache.py')
find_instr(_correlate_sparse_offsets, keyword='pd', sig=0)
find_instr(_correlate_sparse_offsets, keyword='ps', sig=1)
_correlate_sparse_offsets.inspect_asm
_correlate_sparse_offsets.inspect_asm()
get_ipython().run_line_magic('pinfo', '_correlate_sparse_offsets.inspect_asm')
_correlate_sparse_offsets.inspect_asm(0)
q
_correlate_sparse_offsets.inspect_asm()[0]
_correlate_sparse_offsets.inspect_asm().keys()
print(list(_correlate_sparse_offsets.inspect_asm().values())[0])
|
jni/useful-histories
|
numba-experiments.py
|
Python
|
bsd-3-clause
| 5,075
|
[
"Gaussian"
] |
cce029aa5e991b39dad332d18ad1536e128c5d6150bf82a431166ad50aace066
|
# Author: Ilan Schnell <ischnell@enthought.com>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# TODO: should derive from HasTraits
import csv
# FIXME: see loadtxt.py (should really be the loadtxt from numpy)
from mayavi.tools.data_wizards.loadtxt import loadtxt
class Sniff(object):
""" Sniff a CSV file and determine some of it's properties.
The properties determined here allow an CSV of unknown format
to be read by numpy.loadtxt, i.e. the methods and attributes
are suitable to determine required keyword arguments for
numpy.loadtxt
Example::
s = Sniff('mydata.csv')
print repr(s.delimiter()) # ','
print s.skiprows() # 2
a = s.loadtxt() # a is now the array
from numpy import loadtxt # make sure it's numpy 1.1.0 or
# higher
b = loadtxt('mydata.csv', **s.kwds())
"""
def __init__(self, filename):
self._filename = filename
self._lines = self._read_few_lines()
self._reallines = [line for line in self._lines if line.strip()]
self._dialect = csv.Sniffer().sniff(self._reallines[-1])
self._get_comment()
if self._dialect.delimiter.isalnum():
self._usePySplit = True
self._numcols = 1
else:
self._usePySplit = not self._dialect.delimiter.strip()
self._numcols = len(self._split(self._reallines[-1]))
self._datatypes = self._datatypes_of_line(self._reallines[-1])
def _get_comment(self):
self._comment = '#'
line0 = self._reallines[0]
if line0.startswith('#') or line0.startswith('%'):
self._comment = line0[0]
self._reallines[0] = self._dialect.delimiter.join(
line0.split()[1:])
for i in xrange(1, len(self._reallines)):
self._reallines[i] = \
self._reallines[i].split(self._comment)[0]
def _read_few_lines(self):
res = []
f = open(self._filename, 'rb')
for line in f:
line = line.strip()
res.append(line)
if len(res) > 20:
break
f.close()
return res
def _split(self, line):
if self._usePySplit:
return line.split()
else:
return csv.reader([line], self._dialect).next()
def _names(self):
if self._datatypes != self._numcols * (str,):
for line in self._reallines:
if len(self._split(line)) != self._numcols:
continue
if self._datatypes_of_line(line) != self._numcols * (str,):
continue
return tuple(t.strip('"\' \t') for t in self._split(line))
return tuple('Column %i' % (i + 1) for i in xrange(self._numcols))
def _formats(self):
res = []
for c, t in enumerate(self._datatypes):
if t == str:
items = [len(self._split(l)[c]) for l in self._reallines[1:]
if self._datatypes_of_line(l) == self._datatypes]
items.append(1)
res.append('S%i' % max(items))
elif t == float:
res.append(t)
else:
raise TypeError("Hmm, did not expect: %r" % t)
return tuple(res)
def _datatypes_of_line(self, line):
def isFloat(s):
try:
float(s)
return True
except ValueError:
return False
res = []
for s in self._split(line):
if isFloat(s):
res.append(float)
else:
res.append(str)
return tuple(res)
def _debug(self):
print '===== Sniffed information for file %r:' % self._filename
print 'delimiter = %r' % self.delimiter()
print 'comments = %r' % self.comments()
print 'dtype = %r' % self.dtype()
print 'skiprows = %r' % self.skiprows()
#-----------------------------------------------------------------------
# Public API:
#-----------------------------------------------------------------------
def comments(self):
""" Return the character used for comments (usually '#').
"""
return self._comment
def delimiter(self):
""" Return the delimiter string.
When whitespace is used as the delimiter, None is returned.
"""
if self._usePySplit:
return None
else:
return self._dialect.delimiter
def skiprows(self):
""" The number (int) of rows from the top to skip.
"""
for n, line in enumerate(self._lines):
if self._datatypes == self._datatypes_of_line(line):
return n
return 0
def dtype(self):
""" Return a dict suitable to be used as the dtype keyword
argument of loadtxt.
"""
return {'names': self._names(),
'formats': self._formats()}
def kwds(self):
""" Return a dict of the keyword argument needed by numpy.loadtxt
"""
return {'comments': self.comments(),
'delimiter': self.delimiter(),
'skiprows': self.skiprows(),
'dtype': self.dtype()}
def loadtxt(self):
""" Return the array (by using numpy.loadtxt), using the sniffed
information in the keyword arguments.
"""
return loadtxt(self._filename, **self.kwds())
def loadtxt_unknown(filename, verbose=0):
""" Like numpy.loadtxt but more general, in the sense that it uses
Sniff first to determine the necessary keyword arguments for loadtxt.
"""
s = Sniff(filename)
if verbose:
s._debug()
return s.loadtxt()
def array2dict(arr):
""" Takes an array with special names dtypes and returns a dict where
each name is a key and the corresponding data (as a 1d array) is the
value.
"""
res = {}
for k in arr.dtype.names:
res[k] = arr[k]
return res
|
liulion/mayavi
|
mayavi/tools/data_wizards/csv_sniff.py
|
Python
|
bsd-3-clause
| 6,242
|
[
"Mayavi"
] |
bd60ca450f99e6c98a883b33af478d36e8f9930f114085866cf1a0e9a1951ac9
|
# -*- coding: utf-8 -*-
#@author: ilyass.tabiai@polymtl.ca
#@author: rolland.delorme@polymtl.ca
#@author: patrickdiehl@lsu.edu
import pkgutil
vtk_loader = pkgutil.find_loader('vtk')
found_vtk = vtk_loader is not None
if found_vtk == True:
import vtk
## Handles the output of the simulation results to vtk unstrucutred grids
class vtk_writer():
if found_vtk == True:
## Constructor
#
# @param path The path where the output is written
# @param types The simulation attributes considered as output
# @param slice_length Every n-th time step is written
def __init__(self,path="",types="",slice_length=-1):
## IS vtk enabled
self.vtk_enabled = True
## Path for the output
self.path = path
## Types of the attributes
self.types = types
## Slice for the time steps
self.slice_length = slice_length
## Writes the data to a vtk unstrucutred grid files
#
# @param deck The deck with the input from the yaml file
# @param problem The problem contains the simulation results
# @param ccm_class The results from the computation from ccm
def write_data(self,deck,problem,ccm_class):
num_nodes = deck.num_nodes
for t in range(0,deck.time_steps,self.slice_length):
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(self.path+"output_"+str(t)+".vtu")
grid = vtk.vtkUnstructuredGrid()
points = vtk.vtkPoints()
points.SetNumberOfPoints(num_nodes)
points.SetDataTypeToDouble()
for i in range(0,num_nodes):
act = problem.y
if deck.dim == 1:
points.InsertPoint(i,act[i][0][t],0.,0.)
if deck.dim == 2:
points.InsertPoint(i,act[i][0][t],act[i][1][t],0.)
if deck.dim == 3:
points.InsertPoint(i,act[i][0][t],act[i][1][t],act[i][2][t])
grid.SetPoints(points)
dataOut = grid.GetPointData()
for out_type in self.types:
if out_type == "Displacement":
array = vtk.vtkDoubleArray()
array.SetName("Displacement")
array.SetNumberOfComponents(deck.dim)
array.SetNumberOfTuples(num_nodes)
act = problem.y
for i in range(num_nodes):
if deck.dim == 1:
array.SetTuple1(i,act[i][0][t] - deck.geometry.nodes[i][0])
if deck.dim == 2:
array.SetTuple2(i,act[i][0][t] - deck.geometry.nodes[i][0],act[i][1][t] - deck.geometry.nodes[i][1])
array.SetComponentName(0,"d_x")
array.SetComponentName(1,"d_y")
dataOut.AddArray(array)
if out_type == "Neighbors":
array = vtk.vtkIntArray()
array.SetName("Neighbors")
array.SetNumberOfComponents(1)
array.SetNumberOfTuples(num_nodes)
for i in range(num_nodes):
array.SetTuple1(i,len(problem.neighbors.get_index_x_family(i)))
dataOut.AddArray(array)
if out_type == "Force":
array = vtk.vtkDoubleArray()
array.SetName("Volume_Force")
array.SetNumberOfComponents(deck.dim)
array.SetNumberOfTuples(num_nodes)
force = problem.force_int
#print force
for i in range(num_nodes):
if deck.dim == 1:
array.SetTuple1(i,force[i][0][t])
if deck.dim == 2:
array.SetTuple2(i,force[i][0][t], force[i][1][t])
array.SetComponentName(0,"f_x")
array.SetComponentName(1,"f_y")
dataOut.AddArray(array)
if out_type == "Conditions":
for con in deck.conditions:
array = vtk.vtkIntArray()
array.SetName("Condition_"+con.type+"_"+str(con.value)+"_"+str(con.direction))
array.SetNumberOfComponents(1)
array.SetNumberOfTuples(num_nodes)
for i in range(num_nodes):
if i not in con.id:
array.SetTuple1(i,0)
else:
array.SetTuple1(i,1)
dataOut.AddArray(array)
if out_type == "Volume_Force":
force = problem.force_int
for con in deck.conditions:
if con.type == "Force":
result_x = 0.
result_y = 0.
result_z = 0.
for i in con.id:
index = int(i)
if deck.dim >=1:
result_x += force[index][0][t] * deck.geometry.volumes[index]
if deck.dim >= 2:
result_y += force[index][1][t] * deck.geometry.volumes[index]
array.SetComponentName(0,"f_x")
array.SetComponentName(1,"f_y")
if deck.dim >= 3:
result_z += force[index][2][t] * deck.geometry.volumes[index]
array.SetComponentName(2,"f_z")
array = vtk.vtkDoubleArray()
array.SetName("Volume_"+con.type+"_"+str(con.value)+"_"+str(con.direction))
array.SetNumberOfComponents(deck.dim)
array.SetNumberOfTuples(num_nodes)
for i in range(num_nodes):
if i in con.id:
if deck.dim ==1:
array.SetTuple1(i,result_x)
if deck.dim == 2:
array.SetTuple2(i,result_x,result_y)
if deck.dim == 3:
array.SetTuple3(i,result_x,result_y,result_z)
else:
if deck.dim ==1:
array.SetTuple1(i,0.)
if deck.dim == 2:
array.SetTuple2(i,0.,0.)
if deck.dim == 3:
array.SetTuple3(i,0.,0.,0.)
dataOut.AddArray(array)
if out_type == "Strain":
array = vtk.vtkDoubleArray()
array.SetName("Strain")
if deck.dim == 1:
array.SetNumberOfComponents(1)
if deck.dim == 2:
array.SetNumberOfComponents(3)
if deck.dim == 3:
array.SetNumberOfComponents(6)
array.SetNumberOfTuples(num_nodes)
strain = ccm_class.global_strain[:,:,1]
for i in range(num_nodes):
if deck.dim ==1:
array.SetComponentName(0,"epsil_xx")
array.SetTuple1(i,strain[i,0])
if deck.dim == 2:
xx = strain[i*deck.dim,0]
xy = strain[i*deck.dim,1]
yy = strain[i*deck.dim+1,1]
array.SetTuple3(i,xx,yy,xy)
array.SetComponentName(0,"epsil_xx")
array.SetComponentName(1,"epsil_yy")
array.SetComponentName(2,"epsil_xy")
if deck.dim == 3:
xx = strain[i*deck.dim,0]
xy = strain[i*deck.dim,1]
yy = strain[i*deck.dim+1,1]
yz = strain[i*deck.dim+1,2]
xz = strain[i*deck.dim,2]
zz = strain[i*deck.dim+2,2]
array.SetTuple6(i,xx,yy,zz,yz,xz,xy)
array.SetComponentName(0,"epsil_xx")
array.SetComponentName(1,"epsil_yy")
array.SetComponentName(2,"epsil_zz")
array.SetComponentName(3,"epsil_yz")
array.SetComponentName(4,"epsil_xz")
array.SetComponentName(5,"epsil_xy")
dataOut.AddArray(array)
if out_type == "Stress":
array = vtk.vtkDoubleArray()
array.SetName("Stress")
if deck.dim == 1:
array.SetNumberOfComponents(1)
if deck.dim == 2:
array.SetNumberOfComponents(3)
if deck.dim == 3:
array.SetNumberOfComponents(6)
array.SetNumberOfTuples(num_nodes)
stress = ccm_class.global_stress[:,:,1]
for i in range(num_nodes):
if deck.dim ==1:
array.SetComponentName(0,"sigma_xx")
array.SetTuple1(i,strain[i,0])
if deck.dim == 2:
xx = stress[i*deck.dim,0]
xy = stress[i*deck.dim,1]
yy = stress[i*deck.dim+1,1]
array.SetTuple3(i,xx,yy,xy)
array.SetComponentName(0,"sigma_xx")
array.SetComponentName(1,"sigma_yy")
array.SetComponentName(2,"sigma_xy")
if deck.dim == 3:
xx = stress[i*deck.dim,0]
xy = stress[i*deck.dim,1]
yy = stress[i*deck.dim+1,1]
yz = stress[i*deck.dim+1,2]
xz = stress[i*deck.dim,2]
zz = stress[i*deck.dim+2,2]
array.SetTuple6(i,xx,yy,zz,yz,xz,xy)
array.SetComponentName(0,"sigma_xx")
array.SetComponentName(1,"sigma_yy")
array.SetComponentName(2,"sigma_zz")
array.SetComponentName(3,"sigma_yz")
array.SetComponentName(4,"sigma_xz")
array.SetComponentName(5,"sigma_xy")
dataOut.AddArray(array)
if out_type == "Strain_DIC":
array = vtk.vtkDoubleArray()
array.SetName("Strain_DIC")
array.SetNumberOfComponents(3)
array.SetNumberOfTuples(num_nodes)
for i in range(num_nodes):
xx = deck.geometry.strain[i][0]
xy = deck.geometry.strain[i][2]
yy = deck.geometry.strain[i][1]
array.SetTuple3(i,xx,yy,xy)
array.SetComponentName(0,"epsil_xx")
array.SetComponentName(1,"epsil_yy")
array.SetComponentName(2,"epsil_xy")
dataOut.AddArray(array)
if out_type == "Strain_Error":
array = vtk.vtkDoubleArray()
array.SetName("Strain_Error")
array.SetNumberOfComponents(3)
array.SetNumberOfTuples(num_nodes)
strain = ccm_class.global_strain[:,:,1]
for i in range(num_nodes):
xx = abs(deck.geometry.strain[i][0] - strain[i*deck.dim,0])
xy = abs(deck.geometry.strain[i][2] - strain[i*deck.dim,1])
yy = abs(deck.geometry.strain[i][1] - strain[i*deck.dim+1,1])
array.SetTuple3(i,xx,yy,xy)
array.SetComponentName(0,"error_xx")
array.SetComponentName(1,"error_yy")
array.SetComponentName(2,"error_xy")
dataOut.AddArray(array)
if out_type == "Strain_Energy":
array = vtk.vtkDoubleArray()
array.SetName("Strain_Energy")
array.SetNumberOfComponents(1)
array.SetNumberOfTuples(num_nodes)
strain_energy = problem.strain_energy
for i in range(num_nodes):
array.SetTuple1(i,strain_energy[i])
dataOut.AddArray(array)
if out_type == "Volume":
array = vtk.vtkDoubleArray()
array.SetName("Volume")
array.SetNumberOfComponents(1)
array.SetNumberOfTuples(num_nodes)
for i in range(num_nodes):
array.SetTuple1(i,deck.geometry.volumes[i])
dataOut.AddArray(array)
writer.SetInputData(grid)
writer.GetCompressor().SetCompressionLevel(0)
writer.SetDataModeToAscii()
writer.Write()
else:
## Constructor
#
# @param path The path where the output is written
# @param types The simulation attributes considered as output
# @param slice_length Every n-th time step is written
def __init__(self,path="",types="",slice_length=-1):
self.vtk_enabled = False
|
lm2-poly/PeriPyDIC
|
peripydic/IO/vis.py
|
Python
|
gpl-3.0
| 15,318
|
[
"VTK"
] |
75bdfcab07c720c727d379b218a49a53708056fc5be3968e591b69d12ae66463
|
# -*- coding: utf-8 -*-
"""Module modeling a Multilayer Perceptron, featuring a tiny example
Example
-------
Executing this script will run a test consisting on training two linearly
separable set of points
$ python mlp.py
On the other hand, we can import this module and instanciating an Multi-
layer Perceptron in the following way:
mlp = MLP(K_list, activation_functions, diff_activation_functions)
Then, we train the MLP in this fashion:
mlp.train(x_data, t_data,
epochs=1000, batch_size=20, initialize_weights=False,
method='adam', eta=0.1, beta=0, gamma=0.9, beta_1=0.9,
beta_2=0.999, epsilon=1e-8, print_cost=True)
These arguments are widely explained in the MLP 'train' method
"""
from __future__ import division, print_function
import sys
import numpy as np
import mlpOptimizer as mlpo
__author__ = "Ignacio Casso, Daniel Gamo, Gwydion J. Martín, Alberto Terceño"
class MLP(object):
"""Class that models a Multilayer Perceptron (MLP)
Here they are some notation and assumptions that have been made throughout
this module:
- N: number of input data examples
- R: number of layers (the input layer is not considered, and it will
be named as the 0-layer since it doesn't have
activation functions nor weights matrix)
- Dk: number of neurons on k-layer
- A weights matrix on each layer has dimension (Dk, Dk+1). An element
placed in the i-th row, j-th columns can be seen as the i-th weight
of the j-th neuron on layer k+1. Hence, units will multiply the
weights matrixes by their left hand side
- The matrix which groups the N different input data examples places
each one of them in rows. Matrix containing the activations and units
for each layer will be computed and saved in the same way
- Weights and biases matrixes for the k-th layer can be found in the
(k-1)-th index of the lists which hold all these matrixes. It is
important to keep this gap in mind
Attributes
----------
K_list : [int]
List containing (in order) the number of neurons on each layer
(including the input and the output layer)
nb_layers : int
Number of layers in the neuronal networks (excluding the input one)
activation_functions : [function]
Ordered list of the activation functions used on each layer
diff_activation_functions : [function]
Ordered list holding the derivatives functions of the corresponding
activation ones used on each layer
init_seed : int
Seed used in order to initialize weights
weights_list : np.array
List which holds in its (k-1)-th index the weights matrix corres-
ponding to the k-th layer
biases_list : np.array
List which holds in its (k-1)-th index the bias vector corres-
ponding to the k-th layer
y : [np.array]
Multilayer Perceptron outputs
reg_method : string
Indicates the regularization method to be used. There have been
implemented: 'L1', 'L2' and 'Elastic_Net' regularizations
beta : int
Regularization parameter
"""
def __init__(self, K_list,
activation_functions, diff_activation_functions,
init_seed=None):
"""__init__ method for the MLP class
Sets up hyperparameters for the MLP class
Parameters
----------
K_list : [int]
List containing (in order) the number of neurons on each layer
(including the input and the output layer)
activation_functions : [function]
Ordered list of the activation functions used on each layer
diff_activation_functions : [function]
Ordered list holding the derivatives functions of the
corresponding activation ones used on each layer
init_seed : int
Seed used in order to initialize weights
"""
self.K_list = K_list
self.nb_layers = len(K_list) - 1
self.activation_functions = activation_functions
self.diff_activation_functions = diff_activation_functions
self.init_seed = init_seed
self.weights_list = None
self.biases_list = None
# At the beginning there is no input yet
self.y = None
# Regularization parameters will be set afterwards
self.beta = None
self.reg_method = None
# Initialize weights when instatiating a MLP object
self.init_weights()
# %% definition of activation functions and derivatives
@staticmethod
def sigmoid(z):
"""Numerically stable implementation of the sigmoid function
Parameters
----------
z : np.array
Matrix containing activations for each data sample. The activation
for the i-th data sample is stored in the i-th row, as we have
assumed
Returns
-------
np.array
Element-wise sigmoid function applied to parameter z
"""
y = np.zeros(z.shape)
masc1 = z >= 0
masc2 = z < 0
y[masc1] = 1 / (1 + np.exp(-z[masc1]))
y[masc2] = np.exp(z[masc2]) / (np.exp(z[masc2]) + 1)
return y
@staticmethod
def dsigmoid(z):
"""Implementation of the derivative sigmoid function
Parameters
----------
z : np.array
Matrix containing activations for each data sample. The activation
for the i-th data sample is stored in the i-th row, as we have
assumed
Returns
-------
np.array
Element-wise derivative sigmoid function applied to parameter z
"""
return MLP.sigmoid(z) * (1 - MLP.sigmoid(z))
@staticmethod
def dtanh(z):
"""Implementation of the derivative hyperbolic tangent function
Parameters
----------
z : np.array
Matrix containing activations for each data sample. The activation
for the i-th data sample is stored in the i-th row, as we have
assumed
Returns
-------
np.array
Element-wise derivative hyperbolic tangent function applied
to parameter z
"""
return 1 - np.tanh(z)**2
@staticmethod
def relu(z):
"""Implementation of the rectifier activation function
Parameters
----------
z : np.array
Matrix containing activations for each data sample. The activation
for the i-th data sample is stored in the i-th row, as we have
assumed
Notes
----------
This implementation has been choosen since it is been proved to be a
fast way
Source: https://goo.gl/QIiHFP
Returns
-------
np.array
Element-wise rectifier activation function applied to parameter z
"""
return z * (z > 0)
# return np.maximum(z, 0)
@staticmethod
def drelu(z):
"""Implementation of the derivative rectifier function
Parameters
----------
z : np.array
Matrix containing activations for each data sample. The activation
for the i-th data sample is stored in the i-th row, as we have
assumed
Notes
----------
We have decided to define drelu(0) = 0
Returns
-------
np.array
Element-wise derivative rectifier function applied to parameter z
"""
return np.where(z > 0, 1, 0)
@staticmethod
def identity(z):
"""Implementation of the identitity function
Parameters
----------
z : np.array
Matrix containing activations for each data sample. The activation
for the i-th data sample is stored in the i-th row, as we have
assumed
Returns
-------
np.array
Element-wise identity function applied to parameter z
"""
return z
@staticmethod
def didentity(z):
"""Implementation of the derivative identitity function
Parameters
----------
z : np.array
Matrix containing activations for each data sample. The activation
for the i-th data sample is stored in the i-th row, as we have
assumed
Notes
----------
This method only works with numpy arrays
Returns
-------
np.array
Matrix filled with ones (ie, derivative identity function). It has
the exact shape than the input parameter
"""
return np.ones(z.shape)
@staticmethod
def softmax(z):
"""Numerically stable implementation of the softmax function
Parameters
----------
z : np.array
Matrix containing activations for each data sample. The activation
for the i-th data sample is stored in the i-th row, as we have
assumed
Returns
-------
np.array
Row-wise softmax function applied to parameter z
"""
max_values = np.amax(z, axis=1).reshape(z.shape[0], 1)
x = z - max_values
sum_exp = np.sum(np.exp(x), axis=1).reshape(z.shape[0], 1)
return np.exp(x) / sum_exp
# %% cost functions
@staticmethod
def binary_cross_entropy(y, t_data):
"""Numerically stable implementation of the Binary Cross entropy
cost function
Parameters
----------
y : np.array
(N,Dr) matrix which contains the Multilayer Perceptron outputs
for the input data samples labeled with t_data
t_data : np.array
(N,Dr) matrix representing labels for each data sample. If these
labels correspond to a binary classification problems (Dr = 1),
there are as many labels as input data samples. On the other hand,
if we have a multiclass classification problem (Dr > 1),
labels for each sample are row-wise stacked
Returns
-------
int
Binary cross entropy function applied to the MLP outputs and labels
"""
x = np.maximum(y, 10**-15)
x = np.minimum(x, 1 - 10**-15)
return -np.sum(t_data * np.log(x) + (1 - t_data) * np.log(1 - x))
@staticmethod
def softmax_cross_entropy(y, t_data):
"""Numerically stable implementation of the Softmax Cross entropy
cost function
Parameters
----------
y : np.array
(N,Dr) matrix which contains the Multilayer Perceptron outputs
for the input data samples labeled with t_data
t_data : np.array
(N,Dr) matrix representing labels for each data sample. If these
labels correspond to a binary classification problems (Dr = 1),
there are as many labels as input data samples. On the other hand,
if we have a multiclass classification problem (Dr > 1),
labels for each sample are row-wise stacked
Returns
-------
int
Softmax cross entropy function applied to the MLP outputs and labels
"""
x = np.maximum(y, 10**-15)
return -np.sum(t_data * np.log(x))
@staticmethod
def cost_L2(y, t_data):
"""Implementation of the sum squared error cost function
Notes
----------
Preferably used for one-variable function regression problems
Parameters
----------
y : np.array
(N,Dr) matrix which contains the Multilayer Perceptron outputs
for the input data samples labeled with t_data
t_data : np.array
(N,Dr) matrix representing labels for each data sample. If these
labels correspond to a binary classification problems (Dr = 1),
there are as many labels as input data samples. On the other hand,
if we have a multiclass classification problem (Dr > 1),
labels for each sample are row-wise stacked
Returns
-------
int
Sum squared error cost function applied to the MLP outputs and
labels
"""
return 0.5 * np.sum((y - t_data)**2)
# %% simple weights initialization
def init_weights(self):
"""Random weight initialization
Initialize random weights and biases for the Multilayer Perceptron.
If the initial seed has been set then it is used for comparing perfor-
mances with another multilayer perceptrons.
Returns
-------
None
"""
if self.init_seed:
np.random.seed(self.init_seed)
weights_list = []
biases_list = []
for layer in range(self.nb_layers):
new_W = np.random.randn(self.K_list[layer], self.K_list[layer + 1])
new_b = np.zeros(self.K_list[layer + 1])
weights_list.append(new_W)
biases_list.append(new_b)
self.weights_list = weights_list
self.biases_list = biases_list
# %% feed forward pass
def get_activations_and_units(self, x, wb=None):
"""Computes activations and units on each layer until it gets the
final outputs
Parameters
----------
x : np.array
(N,D0) matrix holding each input data sample. The i-th data sample
is stored in the i-th row.
wb : tuple
Tuple consisting of a weights and a biases list to be used for
computing the result. If this parameter is not passed then the MLP
own weights are used for the computations
Returns
-------
Tuple
Tuple consisting of the list of activations and units for each
layer
"""
if wb is None:
weights_list, biases_list = self.weights_list, self.biases_list
else:
weights_list, biases_list = wb
activations = [x]
units = [x]
z = x
for i in range(self.nb_layers):
# Note that the biases_list dimension is runtime resized so as to
# compute the desired operation
a = z.dot(weights_list[i]) + biases_list[i]
activations.append(a)
z = self.activation_functions[i](a)
units.append(z)
self.y = z
return activations, units
# %% backpropagation
def get_gradients(self, x, t, beta=None, wb=None):
"""Backpropagation algorithm computing the gradient for both the
weights and biases on each layer
Parameters
----------
x : np.array
(N,D0) matrix holding each input data sample.
The i-th data sample is stored in the i-th row
t : np.array
(N,Dr) matrix representing labels for each data sample. If these
labels correspond to a binary classification problems (Dr = 1),
there are as many labels as input data samples. On the other hand,
if we have a multiclass classification problem (Dr > 1),
labels for each sample are row-wise stacked
beta : int
Regularization parameter. If the parameter is not passed, then we
use the MLP attribute which has been previously set
wb : tuple
Tuple consisting of a weights and a biases list to be used for
computing the feed forward result. If this parameter is not passed
then the MLP own weights are used for the computations
Notes
----------
Slightly different from the class notes due to the separation of bs
and Ws and the change made to index the weights.
delta_k matrixes have shape (N,Dk)
Returns
-------
Tuple
Tuple consisting of the list of gradients and biases for each layer
Note that k-th index is (k+1)-th layer gradient since layer 0
(input) has no Ws
"""
if wb is None:
weights_list, biases_list = self.weights_list, self.biases_list
else:
weights_list, biases_list = wb
activations, units = self.get_activations_and_units(x, wb)
N = x.shape[0]
grad_w_list = [0] * self.nb_layers
grad_b_list = [0] * self.nb_layers
delta_k1 = None # delta value for the next layer
ks = range(1, self.nb_layers + 1)
ks.reverse()
for k in ks: # r, ..., 1
# Computing new delta values
if k < self.nb_layers:
# Weights of the (k+1)-th layer
w = weights_list[k]
# Obtain derivative activation function on layer k
dh = self.diff_activation_functions[k - 1]
# activations from layer k
a = activations[k]
delta_k = (delta_k1.dot(w.T)) * dh(a)
else:
# We can assume the derivative of En respect to the last
# activations layer is y-t
delta_k = units[k] - t
if beta is None:
b = self.beta
else:
b = beta
# Adding the regularization term for the Ws gradient
reg_term = None
if self.reg_method is None:
reg_term = 0
elif self.reg_method == 'L1':
reg_term = (b * np.sign(weights_list[k - 1]))
elif self.reg_method == 'L2':
reg_term = (b * weights_list[k - 1])
# Elastic net regularization
elif self.reg_method == 'Elastic_Net':
reg_term = (b * (np.sign(weights_list[k - 1]) +
weights_list[k - 1]))
# Thanks to the einsum function we can avoid using another for loop
# See that gradients are averaged because they are computed at the
# for all the input data
grad_wk = (np.einsum(
'ij,ik', units[k - 1], delta_k) / N) + reg_term
grad_w_list[k - 1] = grad_wk
grad_bk = np.sum(delta_k, axis=0) / N
grad_b_list[k - 1] = grad_bk
delta_k1 = delta_k
return grad_w_list, grad_b_list
# %% training method for the MLP
def train(self, x_data, t_data, epochs, batch_size,
initialize_weights=False, print_cost=False, beta=0,
reg_method=None, **opt_args):
"""Trains the Multilayer Perceptron using certain hyperparameters
Parameters
----------
x_data : np.array
(N,D0) matrix holding each input data sample. The i-th data sample
is stored in the i-th row.
t_data : np.array
(N,Dr) matrix representing labels for each data sample. If these
labels correspond to a binary classification problems (Dr = 1),
there are as many labels as input data samples. On the other hand,
if we have a multiclass classification problem (Dr > 1),
labels for each sample are row-wise stacked
epochs : int
Number of epochs to be used to train the model
batch_size : int
Number of data samples to be considered in an epoch
initialize_weights : bool
Boolean flag which decides if a weight initialization has to be
performed
print_cost : bool
Boolean flag which can be used to display the current cost during
the train proccess
beta : int
Regularization parameter. Its default value is 0 (ie, no regulari-
zation)
reg_method : string
Indicates the regularization method to be used. There have been
implemented: 'L1', 'L2' and 'Elastic_Net' regularizations
Returns
-------
None
"""
self.beta = beta
self.reg_method = reg_method
opt = mlpo.Optimizer.get_optimizer(self, **opt_args)
if initialize_weights:
self.init_weights()
nb_data = x_data.shape[0]
index_list = np.arange(nb_data)
nb_batches = int(nb_data / batch_size)
for _ in range(epochs):
np.random.shuffle(index_list)
for batch in range(nb_batches):
indexes = index_list[batch *
batch_size:(batch + 1) * batch_size]
opt.process_batch(x_data[indexes], t_data[indexes])
if print_cost:
x_batch = x_data
t_batch = t_data
self.get_activations_and_units(x_batch)
if self.activation_functions[-1] == MLP.sigmoid:
sys.stdout.write('cost = %f\r' %
MLP.binary_cross_entropy(self.y, t_batch))
sys.stdout.flush()
elif self.activation_functions[-1] == MLP.softmax:
sys.stdout.write('cost = %f\r' %
MLP.softmax_cross_entropy(
self.y, t_batch))
sys.stdout.flush()
else:
sys.stdout.write('cost = %f\r' %
MLP.cost_L2(self.y, t_batch))
sys.stdout.flush()
# %% let's experiment
if __name__ == '__main__':
# %% Create data
# np.random.seed(5)
nb_black = 50
nb_red = 50
nb_data = nb_black + nb_red
x_data_black = np.random.randn(nb_black, 2) + np.array([0, 0])
x_data_red = np.random.randn(nb_red, 2) + np.array([10, 10])
x_data = np.vstack((x_data_black, x_data_red))
t_data = np.asarray([0] * nb_black + [1] * nb_red).reshape(nb_data, 1)
# %% Net structure
D = x_data.shape[1] # initial dimension
K = 1 # final dimension
K_list = [D, K] # list of dimensions
activation_functions = [MLP.sigmoid]
diff_activation_functions = [MLP.dsigmoid]
# %%
mlp = MLP(K_list, activation_functions, diff_activation_functions)
# %% Train begins
mlp.train(x_data, t_data,
epochs=1000, batch_size=20, initialize_weights=False,
method='adam',
eta=0.1,
beta=0,
gamma=0.9,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-8,
print_cost=True)
|
zentonllo/gcom
|
pr3/mlp.py
|
Python
|
mit
| 23,483
|
[
"NEURON"
] |
93564df92683400ca227e90ca307b0fcb3240dcaab64b3d3938f59b1ca3ea204
|
from Firefly.helpers.automation.automation import Automation
|
Firefly-Automation/Firefly
|
Firefly/helpers/automation/__init__.py
|
Python
|
apache-2.0
| 60
|
[
"Firefly"
] |
b86a03eaadbbbf9658aa4501a561971a68179ad37f6d4d61f8140f71570e62ce
|
#!/usr/bin/env python
import argparse
import euston.io as io
import euston.helper as helper
parser = argparse.ArgumentParser(description='Performance analysis of CP2K output files.')
parser.add_argument('input', type=str, help='CP2K log file')
def main(parser):
args = parser.parse_args()
cp2k = io.Cp2kLog(args.input)
cores = cp2k.get_num_cores()
spc = cp2k.get_num_spc()
time = cp2k.get_time_spc()
mds = cp2k.get_num_md()
print 'Number of cores: {0:>25}'.format(cores)
print 'Number of SCF steps: {0:>25}'.format(spc)
print 'SCF time: {0:>25} ({1}s)'.format(helper.human_readable_time(time), time)
print 'MD steps: {0:>25}'.format(mds)
print
print 'SCF steps / MD step: {0:>25}'.format(spc * 1.0 / mds)
timepermd = time / mds
print 'SCF time / MD step: {0:>25} ({1}s)'.format(helper.human_readable_time(timepermd), timepermd)
timeperscf = time / spc
print 'SCF time / SCF step: {0:>25} ({1}s)'.format(helper.human_readable_time(timeperscf), timeperscf)
if __name__ == '__main__':
main(parser)
|
ferchault/euston
|
src/tools/es_cp2kperf.py
|
Python
|
lgpl-3.0
| 1,047
|
[
"CP2K"
] |
481d9ed11d3c053d372718a20f79bcdfebffcea0dfd3ca4d5946b7a6177e625f
|
# -*- coding: utf-8 -*-
"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy-coordinates-design` and the
docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example, we
will define a coordinate system defined by the plane of orbit of the Sagittarius
Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003). The Sgr
coordinate system is often referred to in terms of two angular coordinates,
:math:`\Lambda,B`.
To do this, we need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page http://www.stsci.edu/~dlaw/Sgr/
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
http://adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
http://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `~astropy.coordinates.Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `~astropy.coordinates.Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-determined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.ICRS(280.161732*u.degree, 11.91934*u.degree)
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(r"$\mu_\Lambda \, \cos B$ [{0}]"
.format(sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')))
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(r"$\mu_\alpha \, \cos\delta$ [{0}]"
.format(icrs.pm_ra_cosdec.unit.to_string('latex_inline')))
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(r"$\mu_\delta$ [{0}]"
.format(icrs.pm_dec.unit.to_string('latex_inline')))
plt.show()
|
MSeifert04/astropy
|
examples/coordinates/plot_sgr-coordinate-frame.py
|
Python
|
bsd-3-clause
| 10,549
|
[
"Galaxy"
] |
c88d69822c26010cca4285308b165b3d5efdab79c8d300ef36f315f194552d65
|
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the io/__init__.py module.
"""
from __future__ import (absolute_import, division, print_function)
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import unittest
from io import BytesIO
import iris.fileformats as iff
import iris.io
class TestDecodeUri(unittest.TestCase):
def test_decode_uri(self):
tests = {
'/data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp': (
'file', '/data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp'
),
'C:\data\local\someDir\PP\COLPEX\COLPEX_16a_pj001.pp': (
'file', 'C:\data\local\someDir\PP\COLPEX\COLPEX_16a_pj001.pp'
),
'file:///data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp': (
'file', '///data/local/someDir/PP/COLPEX/COLPEX_16a_pj001.pp'
),
'http://www.somehost.com:8080/resource/thing.grib': (
'http', '//www.somehost.com:8080/resource/thing.grib'
),
'/data/local/someDir/2013-11-25T13:49:17.632797': (
'file', '/data/local/someDir/2013-11-25T13:49:17.632797'
),
}
for uri, pair in tests.items():
self.assertEqual(pair, iris.io.decode_uri(uri))
class TestFileFormatPicker(tests.IrisTest):
def test_known_formats(self):
self.assertString(str(iff.FORMAT_AGENT),
tests.get_result_path(('file_load',
'known_loaders.txt')))
@tests.skip_data
def test_format_picker(self):
# ways to test the format picker = list of (format-name, file-spec)
test_specs = [
('NetCDF',
['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc']),
('NetCDF 64 bit offset format',
['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc.k2']),
('NetCDF_v4',
['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc4.k3']),
('NetCDF_v4',
['NetCDF', 'global', 'xyt', 'SMALL_total_column_co2.nc4.k4']),
('UM Fieldsfile (FF) post v5.2',
['FF', 'n48_multi_field']),
('GRIB',
['GRIB', 'grib1_second_order_packing', 'GRIB_00008_FRANX01']),
('GRIB',
['GRIB', 'jpeg2000', 'file.grib2']),
('UM Post Processing file (PP)',
['PP', 'simple_pp', 'global.pp']),
('UM Fieldsfile (FF) ancillary',
['FF', 'ancillary_fixed_length_header']),
# ('BUFR',
# ['BUFR', 'mss', 'BUFR_Samples',
# 'JUPV78_EGRR_121200_00002501']),
('NIMROD',
['NIMROD', 'uk2km', 'WO0000000003452',
'201007020900_u1096_ng_ey00_visibility0180_screen_2km']),
# ('NAME',
# ['NAME', '20100509_18Z_variablesource_12Z_VAAC',
# 'Fields_grid1_201005110000.txt']),
]
# test that each filespec is identified as the expected format
for (expected_format_name, file_spec) in test_specs:
test_path = tests.get_data_path(file_spec)
with open(test_path, 'r') as test_file:
a = iff.FORMAT_AGENT.get_spec(test_path, test_file)
self.assertEqual(a.name, expected_format_name)
def test_format_picker_nodata(self):
# The following is to replace the above at some point as no real files
# are required.
# (Used binascii.unhexlify() to convert from hex to binary)
# Packaged grib, magic number offset by set length, this length is
# specific to WMO bulletin headers
header_lengths = [21, 80, 41, 42]
for header_length in header_lengths:
binary_string = header_length * '\x00' + 'GRIB' + '\x00' * 100
with BytesIO('rw') as bh:
bh.write(binary_string)
bh.name = 'fake_file_handle'
a = iff.FORMAT_AGENT.get_spec(bh.name, bh)
self.assertEqual(a.name, 'GRIB')
def test_open_dap(self):
# tests that *ANY* http or https URL is seen as an OPeNDAP service.
# This may need to change in the future if other protocols are
# supported.
DAP_URI = 'http://geoport.whoi.edu/thredds/dodsC/bathy/gom15'
a = iff.FORMAT_AGENT.get_spec(DAP_URI, None)
self.assertEqual(a.name, 'NetCDF OPeNDAP')
@tests.skip_data
class TestFileExceptions(tests.IrisTest):
def test_pp_little_endian(self):
filename = tests.get_data_path(('PP', 'aPPglob1', 'global_little_endian.pp'))
self.assertRaises(ValueError, iris.load_cube, filename)
if __name__ == '__main__':
tests.main()
|
Jozhogg/iris
|
lib/iris/tests/test_io_init.py
|
Python
|
lgpl-3.0
| 5,523
|
[
"NetCDF"
] |
463c87297f66f3e2f2c5e62af5cae85d130aeb666ffa8bd994037cf8bdcb496a
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import spectral_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging as logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
new_grad_ys = []
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
new_grad_ys.append(array_ops.fill(
array_ops.shape(y), constant_op.constant(
1, dtype=y.dtype, name="grad_ys_%d" % i)))
continue
if y.dtype.is_floating or y.dtype.is_integer:
if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
raise TypeError("Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
else:
raise TypeError("Tensor %s with type %s must be numeric "
"to obtain a default gradient" %
(y, dtypes.as_dtype(y.dtype).name))
# Create a grad_y tensor in the name scope of the gradient.
# Required for TensorArrays to identify which gradient call a
# grad_y value is coming from.
if isinstance(grad_y, ops.IndexedSlices):
new_grad_ys.append(
ops.IndexedSlices(
indices=(array_ops.identity(
grad_y.indices, name="grad_ys_%d_indices" % i)
if isinstance(grad_y.indices, ops.Tensor) else
grad_y.indices),
values=(array_ops.identity(
grad_y.values, name="grad_ys_%d_values" % i) if isinstance(
grad_y.values, ops.Tensor) else grad_y.values),
dense_shape=(array_ops.identity(
grad_y.dense_shape, name="grad_ys_%d_shape" % i)
if isinstance(grad_y.dense_shape, ops.Tensor) else
grad_y.dense_shape)))
else:
new_grad_ys.append(array_ops.identity(grad_y, name="grad_ys_%d" % i))
return new_grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
def _StopOps(from_ops, stop_gradient_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
In addition, none of `stop_gradient_ops` will be differentiated.
Args:
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
stop_ops.update(op._id for op in stop_gradient_ops) # pylint: disable=protected-access
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_separate_compiled_gradients = func.definition.attr[
"_XlaSeparateCompiledGradients"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_separate_compiled_gradients = op.get_attr(
"_XlaSeparateCompiledGradients")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
return grad_fn() # Exit early
if not xla_compile:
return grad_fn() # Exit early
# If the gradients are supposed to be compiled separately, we give them a
# _XlaScope name that is based on the name_scope of the gradients. Otherwise
# they just inherit the existing _XlaScope name, which lets them be merged
# together with the non-gradient computation.
if xla_separate_compiled_gradients:
xla_grad_scope = "%s_grad_%s" % (xla_scope, scope)
else:
xla_grad_scope = xla_scope
attrs = {
"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())
}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None):
"""Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the derivatives of `ys` with
respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where
each tensor is the `sum(dy/dx)` for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
`stop_gradients` is a `Tensor` or a list of tensors to be considered constant
with respect to all `xs`. These tensors will not be backpropagated through,
as though they had been explicitly disconnected using `stop_gradient`. Among
other things, this allows computation of partial derivatives as opposed to
total derivatives. For example:
a = tf.constant(0.)
b = 2 * a
g = tf.gradients(a + b, [a, b], stop_gradients=[a, b])
Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the
total derivatives `tf.gradients(a + b, [a, b])`, which take into account the
influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is
equivalent to:
a = tf.stop_gradient(tf.constant(0.))
b = tf.stop_gradient(2 * a)
g = tf.gradients(a + b, [a, b])
`stop_gradients` provides a way of stopping gradient after the graph has
already been constructed, as compared to `tf.stop_gradient` which is used
during graph construction. When the two approaches are combined,
backpropagation stops at both `tf.stop_gradient` nodes and nodes in
`stop_gradients`, whichever is encountered first.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate
through.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
RuntimeError: if called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError("tf.gradients not supported in EAGER mode. Use "
"functions in tf.contrib.eager.backprop instead.")
ys = _AsList(ys)
xs = _AsList(xs)
stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(
name, "gradients",
list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [x.handle if isinstance(x, resource_variable_ops.ResourceVariable)
else x
for x in xs]
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(xs, name="x",
as_ref=True)
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
if len(ys) > 1:
ys = [array_ops.identity(y) if y.consumers() else y for y in ys]
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
stop_gradient_ops = [t.op for t in stop_gradients]
pending_count, loop_state = _PendingCount(ops.get_default_graph(), to_ops,
from_ops,
colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# pylint: enable=protected-access
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
func_call = None
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
func_call = ops.get_default_graph()._get_function(op.type)
grad_fn = func_call.python_grad_func
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and
not out_grad) and ((not grad_fn and is_func_call) or
_IsTrainable(op.outputs[i])):
# Only trainable outputs or outputs for a function call that
# will use SymbolicGradient get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(
grad_scope, op, func_call, lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(
grad_scope, op, func_call, lambda: _SymGrad(op, out_grads))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(op.inputs)
for i, (t_in, in_grad) in enumerate(zip(op.inputs, in_grads)):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
try:
in_grad.set_shape(t_in.get_shape())
except ValueError:
raise ValueError(
"Incompatible shapes between op input and calculated "
"input gradient. Forward operation: %s. Input index: %d. "
"Original input shape: %s. "
"Calculated input gradient shape: %s"
% (op.name, i, t_in.shape, in_grad.shape))
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in op.inputs:
# pylint: disable=protected-access
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
# pylint: enable=protected-access
if ready:
if control_flow_ops.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_not_none_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_not_none_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_not_none_grad:
# For an unused exit, if it has trainable outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_ops.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values,
array_ops.gather(grad.indices, g.indices),
g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError("Invalid aggregation_method specified %s." %
aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_ops.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s",
len(out_grad), tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list(
[g for g in out_grad if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat([x.values for x in out_grad], 0),
array_ops.concat([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v) if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
gate_gradients=False, aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
"""
xs = _AsList(xs)
kwargs = {
'colocate_gradients_with_ops': colocate_gradients_with_ops,
'gate_gradients': gate_gradients,
'aggregation_method': aggregation_method
}
# Compute first-order derivatives and iterate for each x in xs.
hessians = []
_gradients = gradients(ys, xs, **kwargs)
for i, _gradient, x in zip(range(len(xs)), _gradients, xs):
# Ensure that x is a vector.
check_rank = check_ops.assert_rank(
x, 1, message='Cannot compute Hessian because element %d of `xs` does '
'not have rank one.' % i
)
with ops.control_dependencies([check_rank]):
# Declare an iterator and tensor array loop variables for the gradients.
n = array_ops.size(x)
loop_vars = [
array_ops.constant(0, dtypes.int32),
tensor_array_ops.TensorArray(x.dtype, n)
]
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, hessian = control_flow_ops.while_loop(
lambda j, _: j < n,
lambda j, result: (j + 1,
result.write(j, gradients(_gradient[j], x)[0])),
loop_vars
)
hessians.append(hessian.stack())
return hessians
|
ychfan/tensorflow
|
tensorflow/python/ops/gradients_impl.py
|
Python
|
apache-2.0
| 40,251
|
[
"VisIt"
] |
357944735e0cc5d189f43e31f5ee2f090c680274a563d1140436731a45679897
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import shutil
import subprocess
import time
from pathlib import Path
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import endpoint_from_flag
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not, when_none
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
if is_state('kubernetes-worker.gpu.enabled'):
remove_state('kubernetes-worker.gpu.enabled')
try:
disable_gpu()
except ApplyNodeLabelFailed:
# Removing node label failed. Probably the master is unavailable.
# Proceed with the upgrade in hope GPUs will still be there.
hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
resources = ['kubectl', 'kubelet', 'kube-proxy']
paths = [hookenv.resource_get(resource) for resource in resources]
if any_file_changed(paths):
set_upgrade_needed()
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-needed')
@when_not('kubernetes-worker.snaps.upgrade-specified')
def upgrade_needed_status():
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
check_resources_for_upgrade_needed()
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', get_node_name())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when_not('kube-control.dns.available')
def notify_user_transient_status():
''' Notify to the user we are in a transient state and the application
is still converging. Potentially remotely, or we may be in a detached loop
wait state '''
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool waiting
# to self host the dns pod, and configure itself to query the dns service
# declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
@when('kubernetes-worker.snaps.installed',
'kube-control.dns.available')
@when_not('kubernetes-worker.snaps.upgrade-needed')
def charm_status(kube_control):
'''Update the status message with the current status of kubelet.'''
update_kubelet_status()
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
get_node_name()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress
load balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
def extra_args_changed():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['port'] = '10250'
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['node-ip'] = ingress_ip
if (dns['enable-kube-dns']):
kubelet_opts['cluster-dns'] = dns['sdn-ip']
# set --allow-privileged flag for kubelet
kubelet_opts['allow-privileged'] = set_privileged()
if is_state('kubernetes-worker.gpu.enabled'):
hookenv.log('Adding '
'--feature-gates=DevicePlugins=true '
'to kubelet')
kubelet_opts['feature-gates'] = 'DevicePlugins=true'
if is_state('endpoint.aws.ready'):
kubelet_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'gce'
kubelet_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'openstack'
kubelet_opts['cloud-config'] = str(cloud_config_path)
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.ingress-ssl-chain-completion',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.4"
elif context['arch'] == 'arm64':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-arm64:1.4"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend:1.4"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ssl_chain_completion'] = config.get(
'ingress-ssl-chain-completion')
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
images = {'amd64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.15.0', # noqa
'arm64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-arm64:0.15.0', # noqa
's390x': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-s390x:0.15.0', # noqa
'ppc64el': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.15.0', # noqa
}
context['ingress_image'] = images.get(context['arch'], images['amd64'])
if get_version('kubelet') < (1, 9):
context['daemonset_api_version'] = 'extensions/v1beta1'
else:
context['daemonset_api_version'] = 'apps/v1beta2'
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Return 'true' if privileged containers are needed.
This is when a) the user requested them
b) user does not care (auto) and GPUs are available in a pre
1.9 era
"""
privileged = hookenv.config('allow-privileged').lower()
gpu_needs_privileged = (is_state('kubernetes-worker.gpu.enabled') and
get_version('kubelet') < (1, 9))
if privileged == 'auto':
privileged = 'true' if gpu_needs_privileged else 'false'
if privileged == 'false' and gpu_needs_privileged:
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
# No need to restart kubernetes (set the restart-needed state)
# because set-privileged is already in the restart path
return privileged
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('nvidia-docker.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
if get_version('kubelet') < (1, 9):
hookenv.status_set(
'active',
'Upgrade to snap channel >= 1.9/stable to enable GPU suppport.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('nvidia-docker.installed')
@when_not('kubernetes-worker.restart-needed')
def nvidia_departed():
"""Cuda departed, probably due to the docker layer switching to a
non nvidia-docker."""
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
try:
goal_state = hookenv.goal_state()
except NotImplementedError:
goal_state = {}
if 'kube-control' in goal_state.get('relations', {}):
hookenv.status_set(
'waiting',
'Waiting for kubernetes-master to become ready')
else:
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if is_state('endpoint.aws.ready'):
cloud_provider = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_provider = 'gce'
elif is_state('endpoint.openstack.ready'):
cloud_provider = 'openstack'
if cloud_provider == 'aws':
return getfqdn().lower()
else:
return gethostname().lower()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined')
@when('kube-control.cluster_tag.available')
@when_not('kubernetes-worker.cloud-request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
cluster_tag = kube_control.get_cluster_tag()
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
cloud.enable_instance_inspection()
cloud.enable_dns_management()
set_state('kubernetes-worker.cloud-request-sent')
hookenv.status_set('waiting', 'waiting for cloud integration')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined')
def clear_requested_integration():
remove_state('kubernetes-worker.cloud-request-sent')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready',
'endpoint.openstack.ready')
@when_not('kubernetes-worker.restarted-for-cloud')
def restart_for_cloud():
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kubelet')
elif is_state('endpoint.openstack.ready'):
_write_openstack_snap_config('kubelet')
set_state('kubernetes-worker.restarted-for-cloud')
set_state('kubernetes-worker.restart-needed')
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def _write_openstack_snap_config(component):
# openstack requires additional credentials setup
openstack = endpoint_from_flag('endpoint.openstack.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'auth-url = {}'.format(openstack.auth_url),
'username = {}'.format(openstack.username),
'password = {}'.format(openstack.password),
'tenant-name = {}'.format(openstack.project_name),
'domain-name = {}'.format(openstack.user_domain_name),
]))
def get_first_mount(mount_relation):
mount_relation_list = mount_relation.mounts()
if mount_relation_list and len(mount_relation_list) > 0:
# mount relation list is a list of the mount layer relations
# for now we just use the first one that is nfs
for mount in mount_relation_list:
# for now we just check the first mount and use that.
# the nfs charm only supports one for now.
if ('mounts' in mount and
mount['mounts'][0]['fstype'] == 'nfs'):
return mount['mounts'][0]
return None
@when('nfs.available')
def nfs_state_control(mount):
''' Determine if we should remove the state that controls the re-render
and execution of the nfs-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs '''
mount_data = get_first_mount(mount)
if mount_data:
nfs_relation_data = {
'options': mount_data['options'],
'host': mount_data['hostname'],
'mountpoint': mount_data['mountpoint'],
'fstype': mount_data['fstype']
}
# Re-execute the rendering if the data has changed.
if data_changed('nfs-config', nfs_relation_data):
hookenv.log('reconfiguring nfs')
remove_state('nfs.configured')
@when('nfs.available')
@when_not('nfs.configured')
def nfs_storage(mount):
'''NFS on kubernetes requires nfs config rendered into a deployment of
the nfs client provisioner. That will handle the persistent volume claims
with no persistent volume to back them.'''
mount_data = get_first_mount(mount)
if not mount_data:
return
addon_path = '/root/cdk/addons/{}'
# Render the NFS deployment
manifest = addon_path.format('nfs-provisioner.yaml')
render('nfs-provisioner.yaml', manifest, mount_data)
hookenv.log('Creating the nfs provisioner.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create nfs provisioner. Will attempt again next update.') # noqa
return
set_state('nfs.configured')
|
vfreex/kubernetes
|
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py
|
Python
|
apache-2.0
| 46,014
|
[
"CDK"
] |
7ea40b6a9a5b07b16571e15445b7bf817fad9560f1853b588586d65cba052a73
|
"""
Acceptance tests for the teams feature.
"""
from ..helpers import UniqueCourseTest
from ...pages.lms.teams import TeamsPage
from nose.plugins.attrib import attr
from ...fixtures.course import CourseFixture
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
@attr('shard_5')
class TeamsTabTest(UniqueCourseTest):
"""
Tests verifying when the Teams tab is present.
"""
def setUp(self):
super(TeamsTabTest, self).setUp()
self.tab_nav = TabNavPage(self.browser)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.teams_page = TeamsPage(self.browser, self.course_id)
self.test_topic = {u"name": u"a topic", u"description": u"test topic", u"id": 0}
def set_team_configuration(self, configuration, enroll_in_course=True, global_staff=False):
"""
Sets team configuration on the course and calls auto-auth on the user.
"""
#pylint: disable=attribute-defined-outside-init
self.course_fixture = CourseFixture(**self.course_info)
if configuration:
self.course_fixture.add_advanced_settings(
{u"teams_configuration": {u"value": configuration}}
)
self.course_fixture.install()
enroll_course_id = self.course_id if enroll_in_course else None
AutoAuthPage(self.browser, course_id=enroll_course_id, staff=global_staff).visit()
self.course_info_page.visit()
def verify_teams_present(self, present):
"""
Verifies whether or not the teams tab is present. If it should be present, also
checks the text on the page (to ensure view is working).
"""
if present:
self.assertIn("Teams", self.tab_nav.tab_names)
self.teams_page.visit()
self.assertEqual("This is the new Teams tab.", self.teams_page.get_body_text())
else:
self.assertNotIn("Teams", self.tab_nav.tab_names)
def test_teams_not_enabled(self):
"""
Scenario: teams tab should not be present if no team configuration is set
Given I am enrolled in a course without team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration(None)
self.verify_teams_present(False)
def test_teams_not_enabled_no_topics(self):
"""
Scenario: teams tab should not be present if team configuration does not specify topics
Given I am enrolled in a course with no topics in the team configuration
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": []})
self.verify_teams_present(False)
def test_teams_not_enabled_not_enrolled(self):
"""
Scenario: teams tab should not be present if student is not enrolled in the course
Given there is a course with team configuration and topics
And I am not enrolled in that course, and am not global staff
When I view the course info page
Then I should not see the Teams tab
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": [self.test_topic]}, enroll_in_course=False)
self.verify_teams_present(False)
def test_teams_enabled(self):
"""
Scenario: teams tab should be present if user is enrolled in the course and it has team configuration
Given I am enrolled in a course with team configuration and topics
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration({u"max_team_size": 10, u"topics": [self.test_topic]})
self.verify_teams_present(True)
def test_teams_enabled_global_staff(self):
"""
Scenario: teams tab should be present if user is not enrolled in the course, but is global staff
Given there is a course with team configuration
And I am not enrolled in that course, but am global staff
When I view the course info page
Then I should see the Teams tab
And the correct content should be on the page
"""
self.set_team_configuration(
{u"max_team_size": 10, u"topics": [self.test_topic]}, enroll_in_course=False, global_staff=True
)
self.verify_teams_present(True)
|
shubhdev/openedx
|
common/test/acceptance/tests/lms/test_teams.py
|
Python
|
agpl-3.0
| 4,564
|
[
"VisIt"
] |
e7a014e23777dea6f74a55c7af21982c5ecf8cbbd8940d8502ef3f86426132ec
|
# Expectation Maximization Lecture
%matplotlib inline
import itertools
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy
import scipy.spatial
from scipy import linalg
from sklearn.cluster import KMeans
from sklearn import mixture
np.random.seed( 2507365 ) # We'll set the random number generator's seed so everyone generates the exact same dataset
# Plot a Gaussian
import math
import matplotlib.mlab as mlab
mean = 0
variance = 1
sigma = math.sqrt(variance)
x = np.linspace(-3,3,100)
plt.plot(x,mlab.normpdf(x,mean,sigma))
plt.savefig('../images/example_gaussian_1D.svg')
# Flipping coins
thetaA = 0.4
thetaB = 0.6
z = []
heads = []
num_flips = 10
for i in range(5):
if np.random.rand() > 0.5:
theta = thetaA
z += ['a']
else:
theta = thetaB
z += ['b']
heads += [sum( np.random.rand(num_flips) < theta )]
# Scikit Learn GMM + EM example:
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Show K-Means performance
def run_with_k( k ):
est = KMeans(n_clusters=k)
plt.cla()
est.fit( np.array(X) )
labels = est.labels_
color_map = [ np.random.rand(3) for _ in range( k ) ]
colors = [ color_map[label] for label in labels ]
#plt.scatter(points_x, points_y, c=colors)
return est
#return labels, est.cluster_centers_
#return cluster_scatter( points, labels )
#return cluster_distance( points, labels )
#return cluster_summin_distance( points, labels )
color_map = [ np.random.rand(3) for _ in range( k ) ]
# Find clusters with k=2
k = 2
#kmean_labels,kmean_centers = run_with_k(k)
est = run_with_k(k)
kmean_labels = est.labels_
kmean_centers = est.cluster_centers_
dists = est.transform( np.array(X) )
mx0 = np.max( dists[:,0] )
mx1 = np.max( dists[:,1] )
dists = [ np.array([r[0]/mx0,0,r[1]/mx1,1]) for r in dists ]
ax = plt.axes()
points_x = [ el[0] for el in X ]
points_y = [ el[1] for el in X ]
colors = [ color_map[label] for label in kmean_labels ]
# Draw cluster labels
plt.scatter(points_x, points_y, c=colors)
plt.scatter(kmean_centers[:,0], kmean_centers[:,1], c=[ color_map[label] for label in range(k) ], s=100)
plt.savefig('../images/kmeans_k=' + str(k) + '_incorrect.svg')
# Draw cluster distances
#plt.gca()
plt.scatter(points_x, points_y, c=dists,s=75)
#plt.scatter(kmean_centers[:,0], kmean_centers[:,1], c=[ color_map[label] for label in range(k) ], s=100)
plt.savefig('../images/kmeans_k=' + str(k) + '_incorrect_dists.svg')
#plt.plot( X, Y )
#ax.set_xlabel('k')
#ax.set_ylabel('Cluster scatter')
# Show different # latent variables for EM
lowest_bic = np.infty
bic = []
#n_components_range = range(1, 7)
n_components_range = range(2, 3)
cv_types = ['spherical', 'tied', 'diag', 'full']
#cv_types = ['diag']
cv_types = ['full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type )
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
#spl = plt.subplot(2, 1, 1)
# for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
# xpos = np.array(n_components_range) + .2 * (i - 2)
# bars.append(plt.bar(xpos, bic[i * len(n_components_range):
# (i + 1) * len(n_components_range)],
# width=.2, color=color))
# plt.xticks(n_components_range)
# plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
# plt.title('BIC score per model')
# xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
# .2 * np.floor(bic.argmin() / len(n_components_range))
# plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
# spl.set_xlabel('Number of components')
# spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
#splot = plt.subplot(2, 1, 2)
splot = plt.subplot(111)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
# for diag or spherical
#v, w = linalg.eigh(np.diagflat(covar))
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
#plt.savefig('../images/em_k=' + str(k) + '_cov_spherical.svg')
plt.savefig('../images/em_k=' + str(k) + '_cov_full.svg')
#plt.show()
#plt.savefig('../images/em_k=' + str(k) + '_cov_spherical.png')
# Walk through EM step by step:
lowest_bic = np.infty
bic = []
#n_components_range = range(1, 7)
n_components_range = range(2, 3)
cv_types = ['spherical', 'tied', 'diag', 'full']
#cv_types = ['diag']
cv_types = ['full']
for iteration in range(1, 8):
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type, verbose=2, n_iter=iteration)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
# Plotting
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
plt.cla()
splot = plt.subplot(111)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
# for diag or spherical
#v, w = linalg.eigh(np.diagflat(covar))
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
#plt.savefig('../images/em_k=' + str(k) + '_cov_spherical.svg')
plt.savefig('../images/em_k=' + str(k) + '_cov_full_step=' + str(iteration) + '.svg')
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
|
kephale/TuftsCOMP135_Spring2016
|
Lecture16/notebooks/Lecture16.py
|
Python
|
apache-2.0
| 7,354
|
[
"Gaussian"
] |
16d19d6f8d3fc9cdc036c6d56890e1cb239a97ffba44a12a98f6d6032186c967
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import glob
import os
import subprocess
from io import open
import sys
from setuptools import setup, find_packages, Extension
try:
from numpy.distutils.misc_util import get_numpy_include_dirs
except ImportError:
print("numpy.distutils.misc_util cannot be imported. Attempting to "
"install...")
subprocess.call(["easy_install", "numpy"])
from numpy.distutils.misc_util import get_numpy_include_dirs
SETUP_PTH = os.path.dirname(os.path.abspath(__file__))
def get_spglib_ext():
"""
Set up spglib extension.
"""
spglibs = glob.glob(os.path.join(SETUP_PTH, "dependencies", "spglib*"))
if len(spglibs) != 1:
raise ValueError("Incorrect number of spglib found in dependencies. "
"Expected 1, got %d" % len(spglibs))
spglibdir = spglibs[0]
# set rest of spglib
spgsrcdir = os.path.join(spglibdir, "src")
include_dirs = [spgsrcdir]
sources = glob.glob(os.path.join(spgsrcdir, "*.c"))
c_opt = [] if sys.version_info.major < 3 else [
"-Wno-error=declaration-after-statement"]
return Extension(
"pymatgen._spglib",
include_dirs=include_dirs + get_numpy_include_dirs(),
sources=[os.path.join(spglibdir, "_spglib.c")] + sources,
extra_compile_args=c_opt)
with open("README.rst") as f:
long_desc = f.read()
ind = long_desc.find("\n")
long_desc = long_desc[ind + 1:]
setup(
name="pymatgen",
packages=find_packages(),
version="3.1.9",
install_requires=["numpy>=1.8", "pyhull>=1.5.3", "six", "prettytable",
"atomicfile", "requests", "pybtex", "pyyaml",
"monty>=0.6.5", "scipy>=0.10"],
extras_require={"plotting": ["matplotlib>=1.1", "prettyplotlib"],
"ase_adaptor": ["ase>=3.3"],
"vis": ["vtk>=6.0.0"],
"abinitio": ["pydispatcher>=2.0.3", "apscheduler==2.1.0"]},
package_data={"pymatgen.core": ["*.json"],
"pymatgen.analysis": ["*.yaml", "*.csv"],
"pymatgen.io.vasp": ["*.yaml"],
"pymatgen.io.feff": ["*.yaml"],
"pymatgen.symmetry": ["*.yaml"],
"pymatgen.entries": ["*.yaml"],
"pymatgen.structure_prediction": ["data/*.json"],
"pymatgen.vis": ["ElementColorSchemes.yaml"],
"pymatgen.command_line": ["OxideTersoffPotentials"],
"pymatgen.analysis.defects": ["*.json"],
"pymatgen.analysis.diffraction": ["*.json"],
"pymatgen.util": ["structures/*.json"]},
author="Shyue Ping Ong, Anubhav Jain, Michael Kocher, Geoffroy Hautier,"
"William Davidson Richards, Stephen Dacek, Dan Gunter, Shreyas Cholia, "
"Matteo Giantomassi, Vincent L Chevrier, Rickard Armiento",
author_email="ongsp@ucsd.edu, anubhavj@mit.edu, mpkocher@lbnl.gov, "
"geoffroy.hautier@uclouvain.be, wrichard@mit.edu, sdacek@mit.edu, "
"dkgunter@lbl.gov, scholia@lbl.gov, gmatteo@gmail.com, "
"vincentchevrier@gmail.com, armiento@mit.edu",
maintainer="Shyue Ping Ong",
url="https://github.com/materialsproject/pymatgen/",
license="MIT",
description="Python Materials Genomics is a robust materials "
"analysis code that defines core object representations for "
"structures and molecules with support for many electronic "
"structure codes. It is currently the core analysis code "
"powering the Materials Project "
"(https://www.materialsproject.org).",
long_description=long_desc,
keywords=["VASP", "gaussian", "ABINIT", "nwchem", "materials", "project",
"electronic", "structure", "analysis", "phase", "diagrams"],
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
ext_modules=[get_spglib_ext()],
scripts=glob.glob(os.path.join(SETUP_PTH, "scripts", "*"))
)
|
sonium0/pymatgen
|
setup.py
|
Python
|
mit
| 4,715
|
[
"ABINIT",
"ASE",
"FEFF",
"Gaussian",
"NWChem",
"VASP",
"VTK",
"pymatgen"
] |
ca6eaa8379eb51cac770636c9ac2497aa8aca27f5faa025c1ed9fcf14e3068c8
|
# ===============================================================================
# Copyright 2021 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
""" invdisttree.py: inverse-distance-weighted interpolation using KDTree
fast, solid, local
"""
import numpy as np
from scipy.spatial import cKDTree as KDTree
# ...............................................................................
class Invdisttree:
"""
adapted from
https://stackoverflow.com/questions/3104781/inverse-distance-weighted-idw-interpolation-with-python
inverse-distance-weighted interpolation using KDTree:
invdisttree = Invdisttree( X, z ) -- data points, values
interpol = invdisttree( q, nnear=3, eps=0, p=1, weights=None, stat=0 )
interpolates z from the 3 points nearest each query point q;
For example, interpol[ a query point q ]
finds the 3 data points nearest q, at distances d1 d2 d3
and returns the IDW average of the values z1 z2 z3
(z1/d1 + z2/d2 + z3/d3)
/ (1/d1 + 1/d2 + 1/d3)
= .55 z1 + .27 z2 + .18 z3 for distances 1 2 3
q may be one point, or a batch of points.
eps: approximate nearest, dist <= (1 + eps) * true nearest
p: use 1 / distance**p
weights: optional multipliers for 1 / distance**p, of the same shape as q
stat: accumulate wsum, wn for average weights
How many nearest neighbors should one take ?
a) start with 8 11 14 .. 28 in 2d 3d 4d .. 10d; see Wendel's formula
b) make 3 runs with nnear= e.g. 6 8 10, and look at the results --
|interpol 6 - interpol 8| etc., or |f - interpol*| if you have f(q).
I find that runtimes don't increase much at all with nnear -- ymmv.
p=1, p=2 ?
p=2 weights nearer points more, farther points less.
In 2d, the circles around query points have areas ~ distance**2,
so p=2 is inverse-area weighting. For example,
(z1/area1 + z2/area2 + z3/area3)
/ (1/area1 + 1/area2 + 1/area3)
= .74 z1 + .18 z2 + .08 z3 for distances 1 2 3
Similarly, in 3d, p=3 is inverse-volume weighting.
Scaling:
if different X coordinates measure different things, Euclidean distance
can be way off. For example, if X0 is in the range 0 to 1
but X1 0 to 1000, the X1 distances will swamp X0;
rescale the data, i.e. make X0.std() ~= X1.std() .
A nice property of IDW is that it's scale-free around query points:
if I have values z1 z2 z3 from 3 points at distances d1 d2 d3,
the IDW average
(z1/d1 + z2/d2 + z3/d3)
/ (1/d1 + 1/d2 + 1/d3)
is the same for distances 1 2 3, or 10 20 30 -- only the ratios matter.
In contrast, the commonly-used Gaussian kernel exp( - (distance/h)**2 )
is exceedingly sensitive to distance and to h.
"""
# anykernel( dj / av dj ) is also scale-free
# error analysis, |f(x) - idw(x)| ? todo: regular grid, nnear ndim+1, 2*ndim
def __init__(self, X, z, leafsize=10, stat=0):
assert len(X) == len(z), "len(X) %d != len(z) %d" % (len(X), len(z))
self.tree = KDTree(X, leafsize=leafsize) # build the tree
self.z = z
self.stat = stat
self.wn = 0
self.wsum = None
def __call__(self, q, nnear=6, eps=0, p=1, weights=None):
# nnear nearest neighbours of each query point --
q = np.asarray(q)
qdim = q.ndim
if qdim == 1:
q = np.array([q])
if self.wsum is None:
self.wsum = np.zeros(nnear)
self.distances, self.ix = self.tree.query(q, k=nnear, eps=eps)
interpol = np.zeros((len(self.distances),) + np.shape(self.z[0]))
jinterpol = 0
for dist, ix in zip(self.distances, self.ix):
if nnear == 1:
wz = self.z[ix]
elif dist[0] < 1e-10:
wz = self.z[ix[0]]
else: # weight z s by 1/dist --
w = 1 / dist ** p
if weights is not None:
w *= weights[ix] # >= 0
w /= np.sum(w)
wz = np.dot(w, self.z[ix])
if self.stat:
self.wn += 1
self.wsum += w
interpol[jinterpol] = wz
jinterpol += 1
return interpol if qdim > 1 else interpol[0]
if __name__ == "__main__":
import sys
N = 10000
Ndim = 2
Nask = N # N Nask 1e5: 24 sec 2d, 27 sec 3d on mac g4 ppc
Nnear = 8 # 8 2d, 11 3d => 5 % chance one-sided -- Wendel, mathoverflow.com
leafsize = 10
eps = 0.1 # approximate nearest, dist <= (1 + eps) * true nearest
p = 1 # weights ~ 1 / distance**p
cycle = 0.25
seed = 1
# exec "\n".join( sys.argv[1:] ) # python this.py N= ...
np.random.seed(seed)
np.set_printoptions(3, threshold=100, suppress=True) # .3f
print(
"\nInvdisttree: N %d Ndim %d Nask %d Nnear %d leafsize %d eps %.2g p %.2g"
% (N, Ndim, Nask, Nnear, leafsize, eps, p)
)
def terrain(x):
"""~ rolling hills"""
return np.sin((2 * np.pi / cycle) * np.mean(x, axis=-1))
known = np.random.uniform(size=(N, Ndim)) ** 0.5 # 1/(p+1): density x^p
z = terrain(known)
ask = np.random.uniform(size=(Nask, Ndim))
invdisttree = Invdisttree(known, z, leafsize=leafsize, stat=1)
interpol = invdisttree(ask, nnear=Nnear, eps=eps, p=p)
print(
"average distances to nearest points: %s"
% np.mean(invdisttree.distances, axis=0)
)
print("average weights: %s" % (invdisttree.wsum / invdisttree.wn))
# see Wikipedia Zipf's law
err = np.abs(terrain(ask) - interpol)
print("average |terrain() - interpolated|: %.2g" % np.mean(err))
# print "interpolate a single point: %.2g" % \
# invdisttree( known[0], nnear=Nnear, eps=eps )
# ============= EOF =============================================
|
USGSDenverPychron/pychron
|
pychron/core/stats/idw.py
|
Python
|
apache-2.0
| 6,555
|
[
"Gaussian"
] |
209dd1593c93894ca2361025dbade01162b0cd6400d9fc233e3b6e8bc127e09b
|
# $Id$
#
# Copyright (C)2003-2010 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Hybrid EState-VSA descriptors (like the MOE VSA descriptors)
"""
import numpy
from rdkit.Chem.EState.EState import EStateIndices as EStateIndices_
from rdkit.Chem.MolSurf import _LabuteHelper as VSAContribs_
import bisect
"""
These default VSA bins were chosen using the PP3K solubility data
set. An arbitrary number of bins were selected and the
boundaries were selected to give an approximately equal number of
atoms per bin
"""
vsaBins=[4.78,5.00,5.410,5.740,6.00,6.07,6.45,7.00,11.0]
def VSA_EState_(mol,bins=None,force=1):
""" *Internal Use Only*
"""
if not force and hasattr(mol,'_vsaEState'):
return mol._vsaEState
if bins is None: bins = estateBins
propContribs = EStateIndices_(mol,force=force)
volContribs = VSAContribs_(mol)
ans = numpy.zeros(len(bins)+1,numpy.float)
for i,prop in enumerate(propContribs):
if prop is not None:
bin = bisect.bisect_right(bins,volContribs[i+1])
ans[bin] += prop
mol._vsaEState=ans
return ans
"""
These default EState bins were chosen using the PP3K solubility data
set. An arbitrary number of bins (10) were selected and the
boundaries were selected to give an approximately equal number of
atoms per bin
"""
estateBins=[-0.390,0.290,0.717,1.165,1.540,1.807,2.05,4.69,9.17,15.0]
def EState_VSA_(mol,bins=None,force=1):
""" *Internal Use Only*
"""
if not force and hasattr(mol,'_eStateVSA'):
return mol._eStateVSA
if bins is None: bins = estateBins
propContribs = EStateIndices_(mol,force=force)
volContribs = VSAContribs_(mol)
ans = numpy.zeros(len(bins)+1,numpy.float)
for i,prop in enumerate(propContribs):
if prop is not None:
bin = bisect.bisect_right(bins,prop)
ans[bin] += volContribs[i+1]
mol._eStateVSA=ans
return ans
def _InstallDescriptors():
for i in range(len(vsaBins)):
fn = lambda x,y=i:VSA_EState_(x,force=0)[y]
if i > 0:
fn.__doc__="VSA EState Descriptor %d (% 4.2f <= x < % 4.2f)"%(i+1,vsaBins[i-1],vsaBins[i])
else:
fn.__doc__="VSA EState Descriptor %d (-inf < x < % 4.2f)"%(i+1,vsaBins[i])
name="VSA_EState%d"%(i+1)
fn.version="1.0.0"
globals()[name]=fn
i+=1
fn = lambda x,y=i:VSA_EState_(x,force=0)[y]
fn.__doc__="VSA EState Descriptor %d (% 4.2f <= x < inf)"%(i+1,vsaBins[i-1])
name="VSA_EState%d"%(i+1)
fn.version="1.0.0"
globals()[name]=fn
fn=None
for i in range(len(estateBins)):
fn = lambda x,y=i:EState_VSA_(x,force=0)[y]
if i > 0:
fn.__doc__="EState VSA Descriptor %d (% 4.2f <= x < % 4.2f)"%(i+1,estateBins[i-1],estateBins[i])
else:
fn.__doc__="EState VSA Descriptor %d (-inf < x < % 4.2f)"%(i+1,estateBins[i])
name="EState_VSA%d"%(i+1)
fn.version="1.0.1"
globals()[name]=fn
i+=1
fn = lambda x,y=i:EState_VSA_(x,force=0)[y]
fn.__doc__="EState VSA Descriptor %d (% 4.2f <= x < inf)"%(i+1,estateBins[i-1])
name="EState_VSA%d"%(i+1)
fn.version="1.0.1"
globals()[name]=fn
fn=None
# Change log for EState_VSA descriptors:
# version 1.0.1: optimizations, values unaffected
_InstallDescriptors()
|
strets123/rdkit
|
rdkit/Chem/EState/EState_VSA.py
|
Python
|
bsd-3-clause
| 3,368
|
[
"MOE",
"RDKit"
] |
4423c18fbb0705153f51b1e1ae0166a683a39c50c69861a54145c60fb07a7e1b
|
import sys
import warnings
import _nemo as _n
_n.init()
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
def new_func(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
new_func.__name__ = func.__name__
new_func.__doc__ = func.__doc__
new_func.__dict__.update(func.__dict__)
return new_func
# _n.Simulation.step = step
class Configuration:
def __init__(self):
self._conf = _n.Configuration()
def set_stdp_function(self, prefire, postfire, minWeight, maxWeight):
return self._conf.set_stdp_function(prefire, postfire, minWeight, maxWeight)
def set_cuda_backend(self, deviceNumber):
return self._conf.set_cuda_backend(deviceNumber)
def set_cpu_backend(self):
return self._conf.set_cpu_backend()
def backend_description(self):
return self._conf.backend_description()
__doc__ = _n.Configuration.__doc__
set_stdp_function.__doc__ = _n.Configuration.set_stdp_function.__doc__
set_cuda_backend.__doc__ = _n.Configuration.set_cuda_backend.__doc__
set_cpu_backend.__doc__ = _n.Configuration.set_cpu_backend.__doc__
backend_description.__doc__ = _n.Configuration.backend_description.__doc__
class Network():
def __init__(self):
self._net = _n.Network()
def add_neuron_type(self, name):
if not hasattr(self, 'includedTypes'):
res = self._net.add_neuron_type(name)
self.includedTypes = {res: name, name: res} # Keep a bijective dictionary for later
else:
if name in self.includedTypes.keys():
res = self.includedTypes[name]
else:
res = self._net.add_neuron_type(name)
self.includedTypes[res] = name
self.includedTypes[name] = res
return res
def add_neuron(self, neuron_type, neuron_idx, paramDict=None, stateDict=None):
"""
Add one or more neurons to the network.
Inputs:
neuron_type -- Integer coding the type of the neuron(s). See Network.add_neuron_type.
neuron_idx -- Neuron index.
paramDict -- Dictionary of named neuron parameters. See NeMo manual.
stateDict -- Dictionary of named neuron state variables. See NeMo manual.
"""
try:
if neuron_type not in self.includedTypes:
raise RuntimeError('That neuron type is not in this network. Use add_neuron_type first.')
except AttributeError:
raise RuntimeError('No neuron types in this network. Use add_neuron_type first.')
if self.includedTypes[neuron_type] == 'Izhikevich':
self._net.add_neuron(neuron_type, neuron_idx, paramDict['a'], paramDict['b'], paramDict['c'], \
paramDict['d'], paramDict['sigma'], stateDict['u'], stateDict['v'])
elif self.includedTypes[neuron_type] == 'Izhikevich2007DopSyn':
self._net.add_neuron(neuron_type, neuron_idx, paramDict['a'], paramDict['b'], paramDict['c'], \
paramDict['d'], paramDict['vpeak'], paramDict['Vr'], paramDict['Vt'], paramDict['k'], \
paramDict['Cm'], paramDict['sigma'], paramDict['d1'], paramDict['d2'], \
paramDict['tMdtOt_exc'], paramDict['G_exc'], paramDict['E_exc'], \
paramDict['tMdtOt_inh'], paramDict['G_inh'], paramDict['E_inh'], \
stateDict['v'], stateDict['u'], stateDict['ge'], stateDict['gi'])
elif self.includedTypes[neuron_type] == 'IzhikevichSyn':
self._net.add_neuron(neuron_type, neuron_idx, paramDict['a'], paramDict['b'], paramDict['c'], \
paramDict['d'], paramDict['vpeak'], paramDict['sigma'], \
paramDict['tMdtOt_exc'], paramDict['G_exc'], paramDict['E_exc'], \
paramDict['tMdtOt_inh'], paramDict['G_inh'], paramDict['E_inh'], \
stateDict['v'], stateDict['u'], stateDict['ge'], stateDict['gi'])
elif self.includedTypes[neuron_type] == 'Izhikevich2007':
self._net.add_neuron(neuron_type, neuron_idx, paramDict['a'], paramDict['b'], paramDict['c'], \
paramDict['d'], paramDict['vpeak'], paramDict['Vr'], paramDict['Vt'], paramDict['k'], \
paramDict['Cm'], paramDict['sigma'], \
paramDict['tMdtOt_exc'], paramDict['G_exc'], paramDict['E_exc'], \
paramDict['tMdtOt_inh'], paramDict['G_inh'], paramDict['E_inh'], \
stateDict['v'], stateDict['u'], stateDict['ge'], stateDict['gi'])
elif self.includedTypes[neuron_type] == 'IF_curr_exp':
self._net.add_neuron(neuron_type, neuron_idx, paramDict['v_rest'], paramDict['v_reset'], \
paramDict['c_m'], paramDict['tau_m'], paramDict['tau_refrac'], paramDict['tau_synE'], \
paramDict['tau_synI'], paramDict['v_thresh'], paramDict['I_offset'], \
stateDict['v'], stateDict['ie'], stateDict['ii'], stateDict['lastfired'])
elif self.includedTypes[neuron_type] == 'Input' or self.includedTypes[neuron_type] == 'InhomogeneousPoissonSource':
self._net.add_neuron(neuron_type, neuron_idx)
elif self.includedTypes[neuron_type] == 'PoissonSource':
self._net.add_neuron(neuron_type, neuron_idx, paramDict['lambda'])
elif self.includedTypes[neuron_type] == 'BayesianNeuron':
self._net.add_neuron(neuron_type, neuron_idx, paramDict['rOn'], paramDict['rOff'], \
paramDict['g0'], stateDict['L'], stateDict['G'])
elif self.includedTypes[neuron_type] == 'Kuramoto':
self._net.add_neuron(neuron_type, neuron_idx, paramDict['omega'], paramDict['Cmean'], stateDict['theta'])
elif self.includedTypes[neuron_type] == 'KuramotoLag' or self.includedTypes[neuron_type] == 'KuramotoLagFloat':
self._net.add_neuron(neuron_type, neuron_idx, paramDict['omega'], paramDict['Cmean'], \
paramDict['alpha'], stateDict['theta'])
elif self.includedTypes[neuron_type] == 'IF_lin':
self._net.add_neuron(neuron_type, neuron_idx, paramDict['c_m'], paramDict['tau_refrac'], \
paramDict['v_thresh'], paramDict['I_offset'], \
stateDict['v'], stateDict['lastfired'])
# elif self.includedTypes[neuron_type] is 'IF_lin':
# self._net.add_neuron(neuron_type, neuron_idx, paramDict['c_m'], paramDict['tau_refrac'], \
# paramDict['v_thresh'], paramDict['I_offset'], \
# stateDict['v'], stateDict['ie'], stateDict['ii'], stateDict['lastfired'])
else:
# FIXME : this message doesn't print, so the user gets an unexplained error
#print "WARNING!! You are trying to add a neuron type not considered in the Python API for NeMo version @NEMO_VERSION@. \
# Weird things can happen. Go to <nemo_root>/src/api/python/wrappers.py and write your add_neuron method."
argList = paramDict.values() + stateDict.values()
self._net.add_neuron(neuron_type, neuron_idx, *argList)
def add_synapse(self, source, target, delay, weight, plastic):
return self._net.add_synapse(source, target, delay, weight, plastic)
def set_neuron(self, *args, **kwargs):
return self._net.set_neuron(*args, **kwargs)
def get_neuron_state(self, *args, **kwargs):
return self._net.get_neuron_state(*args, **kwargs)
def get_neuron_parameter(self, *args, **kwargs):
return self._net.get_neuron_parameter(*args, **kwargs)
def set_neuron_state(self, *args, **kwargs):
return self._net.set_neuron_state(*args, **kwargs)
def set_neuron_parameter(self, *args, **kwargs):
return self._net.set_neuron_parameter(*args, **kwargs)
def get_synapse_source(self, *args, **kwargs):
return self._net.get_synapse_source(*args, **kwargs)
def neuron_count(self, *args, **kwargs):
return self._net.neuron_count(*args, **kwargs)
def get_synapses_from(self, *args, **kwargs):
return self._net.get_synapses_from(*args, **kwargs)
def get_synapse_target(self, *args, **kwargs):
return self._net.get_synapse_target(*args, **kwargs)
def get_synapse_delay(self, *args, **kwargs):
return self._net.get_synapse_delay(*args, **kwargs)
def get_synapse_weight(self, *args, **kwargs):
return self._net.get_synapse_weight(*args, **kwargs)
def get_synapse_plastic(self, *args, **kwargs):
return self._net.get_synapse_plastic(*args, **kwargs)
__doc__ = _n.Network.__doc__
add_neuron_type.__doc__ = _n.Network.add_neuron_type.__doc__
add_synapse.__doc__ = _n.Network.add_synapse.__doc__
set_neuron.__doc__ = _n.Network.set_neuron.__doc__
get_neuron_state.__doc__ = _n.Network.get_neuron_state.__doc__
get_neuron_parameter.__doc__ = _n.Network.get_neuron_parameter.__doc__
set_neuron_state.__doc__ = _n.Network.set_neuron_state.__doc__
set_neuron_parameter.__doc__ = _n.Network.set_neuron_parameter.__doc__
get_synapse_source.__doc__ = _n.Network.get_synapse_source.__doc__
neuron_count.__doc__ = _n.Network.neuron_count.__doc__
get_synapses_from.__doc__ = _n.Network.get_synapses_from.__doc__
get_synapse_target.__doc__ = _n.Network.get_synapse_target.__doc__
get_synapse_delay.__doc__ = _n.Network.get_synapse_delay.__doc__
get_synapse_weight.__doc__ = _n.Network.get_synapse_weight.__doc__
get_synapse_plastic.__doc__ = _n.Network.get_synapse_plastic.__doc__
class Simulation:
def __init__(self, net, conf):
self._sim = _n.Simulation(net._net, conf._conf)
def step_noinput(self):
"""
run simulation for a single cycle (1ms)
"""
return self._sim.step_noinput()
def step_f(self, fstim):
"""
run simulation for a single cycle (1ms)
Inputs:
fstim -- An optional list of neurons which will be forced to fire this cycle
"""
return self._sim.step_f(fstim)
def step_i(self, istim):
"""
run simulation for a single cycle (1ms)
Inputs:
istim -- An optional list of neuron index/current pairs (tuples) for external stimulus of the network
"""
return self._sim.step_i(istim)
def step_fi(self, fstim, istim):
"""
run simulation for a single cycle (1ms)
Inputs:
fstim -- An optional list of neurons which will be forced to fire this cycle
istim -- An optional list of neuron index/current pairs (tuples) for external stimulus of the network
"""
return self._sim.step_fi(fstim, istim)
def step(self, fstim=None, istim=None):
"""
run simulation for a single cycle (1ms)
Inputs:
fstim -- An optional list of neurons which will be forced to fire this cycle
istim -- An optional list of neuron index/current pairs (tuples) for external stimulus of the network
"""
firing_input = [fstim] if isinstance(fstim, int) else fstim
if istim:
if (not isinstance(istim[0], tuple)) or (len(istim[0])!=2):
raise ValueError('Current input must be specified as a list of (idx, I) pairs, e.g. [(idx1, I1), (idx2, I2), ...]')
if fstim is not None:
return self._sim.step_fi(firing_input, istim)
else:
return self._sim.step_i(istim)
else:
if fstim is not None:
return self._sim.step_f(firing_input)
else:
return self._sim.step_noinput()
def propagate(self, *args, **kwargs):
return self._sim.propagate(*args, **kwargs)
def apply_stdp(self, *args, **kwargs):
return self._sim.apply_stdp(*args, **kwargs)
def set_neuron(self, *args, **kwargs):
return self._sim.set_neuron(*args, **kwargs)
def get_neuron_state(self, idx, varno):
return self._sim.get_neuron_state(idx, varno)
def get_neuron_parameter(self, idx, varno):
return self._sim.get_neuron_parameter(idx, varno)
def set_neuron_state(self, idx, varno, val):
return self._sim.set_neuron_state(idx, varno, val)
def set_neuron_parameter(self, idx, varno, val):
return self._sim.set_neuron_parameter(idx, varno, val)
def get_membrane_potential(self, idx):
return self._sim.get_membrane_potential(idx)
def get_synapses_from(self, source):
return self._sim.get_synapses_from(source)
def get_synapse_source(self, synapse):
return self._sim.get_synapse_source(synapse)
def get_synapse_target(self, synapse):
return self._sim.get_synapse_target(synapse)
def get_synapse_delay(self, synapse):
return self._sim.get_synapse_delay(synapse)
def get_synapse_weight(self, synapse):
return self._sim.get_synapse_weight(synapse)
def get_synapse_plastic(self, synapse):
return self._sim.get_synapse_plastic(synapse)
def elapsed_wallclock(self):
return self._sim.elapsed_wallclock()
def elapsed_simulation(self):
return self._sim.elapsed_simulation()
def reset_timer(self):
return self._sim.reset_timer()
__doc__ = _n.Simulation.__doc__
try:
propagate.__doc__ = _n.Simulation.propagate.__doc__
except AttributeError:
pass
apply_stdp.__doc__ = _n.Simulation.apply_stdp.__doc__
set_neuron.__doc__ = _n.Simulation.set_neuron.__doc__
get_neuron_state.__doc__ = _n.Simulation.get_neuron_state.__doc__
get_neuron_parameter.__doc__ = _n.Simulation.get_neuron_parameter.__doc__
set_neuron_state.__doc__ = _n.Simulation.set_neuron_state.__doc__
set_neuron_parameter.__doc__ = _n.Simulation.set_neuron_parameter.__doc__
get_membrane_potential.__doc__ = _n.Simulation.get_membrane_potential.__doc__
get_synapses_from.__doc__ = _n.Simulation.get_synapses_from.__doc__
get_synapse_source.__doc__ = _n.Simulation.get_synapse_source.__doc__
get_synapse_target.__doc__ = _n.Simulation.get_synapse_target.__doc__
get_synapse_delay.__doc__ = _n.Simulation.get_synapse_delay.__doc__
get_synapse_weight.__doc__ = _n.Simulation.get_synapse_weight.__doc__
get_synapse_plastic.__doc__ = _n.Simulation.get_synapse_plastic.__doc__
elapsed_wallclock.__doc__ = _n.Simulation.elapsed_wallclock.__doc__
elapsed_simulation.__doc__ = _n.Simulation.elapsed_simulation.__doc__
reset_timer.__doc__ = _n.Simulation.reset_timer.__doc__
|
brainstudio-team/NeMo
|
src/api/python/wrappers.py
|
Python
|
gpl-2.0
| 14,847
|
[
"NEURON"
] |
117c52e14ea040f208309871dce23ef624241f67f2ce5cc1632f4ce560603a86
|
#!/usr/bin/env python
#coding: utf-8
#### CLASSES ####
class fasta():
"""
"""
def __init__(self):
"""
"""
self.fastaDict = {}
#### FUNCTIONS ####
def fasta_reader(self, fastaFile):
"""
"""
fastaDict = {}
subHeader("Fasta reader")
fh = open(fastaFile)
# ditch the boolean (x[0]) and just keep the header or sequence since
# we know they alternate.
faiter = (x[1] for x in itertools.groupby(fh, lambda line: line[0] == ">"))
for header in faiter:
# drop the ">"
header = header.next()[1:].strip()
# drop the info
header = header.split(" ")[0]
info("Reading " + header + "...")
# join all sequence lines to one.
seq = "".join(s.strip() for s in faiter.next())
fastaDict[header] = seq
self.fastaDict = fastaDict
def write_fasta(self, outFilePath):
"""
"""
outFile = open(outFilePath, "w" )
for header, seq in self.fastaDict.iteritems():
header = ">" + header
outFile.write("%s\n" % header)
outFile.write("%s\n" % seq)
# Close output fasta file
outFile.close()
class cluster():
"""
"""
def __init__(self, alignmentObj, clippedSide):
"""
"""
self.chrom = alignmentObj.reference_name
self.clippedSide = clippedSide
self.bkpPos = alignmentObj.reference_start if clippedSide == "beg" else alignmentObj.reference_end
self.clippedReadDict = {}
self.consensusSeq = ""
def addClippedRead(self, alignmentObj):
"""
"""
mate = '/1' if alignmentObj.is_read1 else '/2'
readId = alignmentObj.query_name + mate
self.bkpPos = alignmentObj.reference_start if self.clippedSide == "beg" else alignmentObj.reference_end
operation = alignmentObj.cigartuples[0][0] if self.clippedSide == "beg" else alignmentObj.cigartuples[-1][0]
clipType = "soft" if operation == 4 else "hard"
self.clippedReadDict[readId] = {}
self.clippedReadDict[readId]["alignmentObj"] = alignmentObj
self.clippedReadDict[readId]["clipType"] = clipType
def nbReads(self):
"""
"""
return len(self.clippedReadDict)
def readIdList(self):
"""
"""
return list(self.clippedReadDict.keys())
def addReadSeqs(self, fastaObj):
"""
"""
for readId in self.clippedReadDict.keys():
alignmentObj = self.clippedReadDict[readId]["alignmentObj"]
## Make the reverse complementary of reads aligned on the reverse strand
if (alignmentObj.is_reverse == True):
readSeq = rev_complement(fastaObj.fastaDict[readId])
else:
readSeq = fastaObj.fastaDict[readId]
self.clippedReadDict[readId]["seq"]= readSeq
def makeConsensusSeq(self, outDir):
"""
multiple sequence alignment based
"""
## A) Single sequence
if len(self.clippedReadDict.keys()) == 1:
consensusSeq = list(self.clippedReadDict.values())[0]["seq"].upper()
## B) Multiple sequence
else:
command = 'mkdir -p ' + outDir
os.system(command) # returns the exit status
### 1. Create fasta file containing cluster supporting reads
fastaObj = fasta()
fastaDict = {}
for readId in self.clippedReadDict.keys():
fastaDict[readId] = self.clippedReadDict[readId]["seq"]
fastaObj.fastaDict = fastaDict
fastaPath = outDir + '/supportingReads.fa'
fastaObj.write_fasta(fastaPath)
### 2. Make multiple sequence alignment
msfPath = outDir + '/supportingReads.msf'
command = 'muscle -in ' + fastaPath + ' -out ' + msfPath + ' -msf'
print command
os.system(command) # returns the exit status
### 3. Generate consensus sequence (cons tool from EMBOSS packagge)
consensusPath = outDir + '/consensus.fa'
command = 'cons -sequence ' + msfPath + ' -outseq ' + consensusPath + ' -identity 0 -plurality 0'
print command
os.system(command) # returns the exit status
### Read consensus sequence
fastaObj = fasta()
fastaObj.fasta_reader(consensusPath)
consensusSeq = fastaObj.fastaDict["EMBOSS_001"].upper()
### Do cleanup
command = 'rm ' + fastaPath + ' ' + msfPath + ' ' + consensusPath
os.system(command) # returns the exit status
## Replace '-' by 'N' for ambiguous bases:
consensusSeq = consensusSeq.replace('-', 'N')
## Convert consensus sequence into upper case:
consensusSeq = consensusSeq.upper()
return consensusSeq
#### FUNCTIONS ####
def log(label, string):
"""
Display labelled information
"""
print "[" + label + "]", string
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
def rev_complement(seq):
"""
Make the reverse complementary of a dna sequence
Input:
1) seq. DNA sequence
Output:
1) revComplementSeq. Reverse complementary of input DNA sequence
"""
baseComplementDict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
seq = seq.upper()
revSeq = seq[::-1] # Make reverse sequence
letters = list(revSeq)
letters = [baseComplementDict[base] for base in letters]
revComplementSeq = ''.join(letters) # Make complement of reverse sequence
return revComplementSeq
def overlap(begA, endA, begB, endB):
"""
Check if both ranges overlap. 2 criteria for defining overlap:
## A) Begin of the range A within the range B
# *beg* <---------range_A---------->
# <---------range_B---------->
# *beg* <-------range_A----->
# <-------------range_B------------------>
## B) Begin of the range B within the range A
# <---------range_A---------->
# *beg* <---------range_B---------->
# <-------------range_A----------------->
# *beg* <-------range_B------>
"""
# a) Begin of the range A within the range B
if ((begA >= begB) and (begA <= endB)):
overlap = True
# b) Begin of the range B within the range A
elif ((begB >= begA) and (begB <= endA)):
overlap = True
# c) Ranges do not overlapping
else:
overlap = False
return overlap
def getClippedPairedClusters(chrPlus, begPlus, endPlus, chrMinus, begMinus, endMinus, rgType, bamFile, windowSize):
"""
"""
## 1. Extract clipped reads for positive cluster
chrom = chrPlus
if (rgType == "DUP"):
beg = int(begPlus) - windowSize
end = int(begPlus) + windowSize
else:
beg = int(endPlus) - windowSize
end = int(endPlus) + windowSize
clippedBegPlusList, clippedEndPlusList = getClippedInterval(chrom, beg, end, bamFile)
## 2. Extract clipped reads for negative cluster
chrom = chrMinus
if (rgType == "DUP"):
beg = int(endMinus) - windowSize
end = int(endMinus) + windowSize
else:
beg = int(begMinus) - windowSize
end = int(begMinus) + windowSize
print "range_-: ", chrom, beg, end
clippedBegMinusList, clippedEndMinusList = getClippedInterval(chrom, beg, end, bamFile)
## 3. Merge clipped read lists:
clippedBegList = list(set(clippedBegPlusList + clippedBegMinusList))
clippedEndList = list(set(clippedEndPlusList + clippedEndMinusList))
return clippedBegList, clippedEndList
def getClippedUnpairedCluster(chrPlus, begPlus, endPlus, bamFile, windowSize):
"""
"""
## 1. Extract clipped reads for cluster beginning
chrom = chrPlus
beg = int(begPlus) - windowSize
end = int(begPlus) + windowSize
print "range_beg: ", chrom, beg, end
clippedBegClusterBegList, clippedEndClusterBegList = getClippedInterval(chrom, beg, end, bamFile)
## 2. Extract clipped reads for cluster ending
chrom = chrPlus
beg = int(endPlus) - windowSize
end = int(endPlus) + windowSize
print "range_end: ", chrom, beg, end
clippedBegClusterEndList, clippedEndClusterEndList = getClippedInterval(chrom, beg, end, bamFile)
## 3. Merge clipped read lists:
clippedBegList = list(set(clippedBegClusterBegList + clippedBegClusterEndList))
clippedEndList = list(set(clippedEndClusterBegList + clippedEndClusterEndList))
return clippedBegList, clippedEndList
def getClippedInterval(chrom, beg, end, bamFile):
'''
'''
#print "** pickClipped function **"
clippedBegList = []
clippedEndList = []
# Extract alignments in the interval
iterator = bamFile.fetch(chrom, beg, end)
# Iterate over the alignments
for alignmentObj in iterator:
### Discard unmapped reads and PCR duplicates
if (alignmentObj.is_unmapped == False) and (alignmentObj.is_duplicate == False):
firstOperation = alignmentObj.cigartuples[0][0]
lastOperation = alignmentObj.cigartuples[-1][0]
#### Cleck if soft-clipped read
# Note: soft (Operation=4) or hard clipped (Operation=5)
# Discard reads clipped both in the beginning and ending
## a) Clipping at the beginning of the read while not clipping at all at the end
# *******--------- (clipped bases: *)
if ((firstOperation == 4) or (firstOperation == 5)) and ((lastOperation != 4) and (lastOperation != 5)):
clippedBegList.append(alignmentObj)
## b) Clipping at the end of the read while not clipping at all at the beginning
# ---------******* (clipped bases: *)
elif ((lastOperation == 4) or (lastOperation == 5)) and ((firstOperation != 4) and (firstOperation != 5)):
clippedEndList.append(alignmentObj)
return clippedBegList, clippedEndList
def clusterCLipped(clippedList, clippedSide, minNbReads, maxNbReads):
'''
'''
#print "** clusterCLipped function **"
### 1. Sort the list of clipped reads in increasing coordinates order
if (clippedSide == "beg"):
clippedSortedList = sorted(clippedList, key=lambda alignmentObj: alignmentObj.reference_start, reverse=False)
else:
clippedSortedList = sorted(clippedList, key=lambda alignmentObj: alignmentObj.reference_end, reverse=False)
### 2. Make clipped read clusters:
clusterList = []
## For each clipped read alignment
for alignmentObj in clippedSortedList:
# A) No cluster in the list -> Create first cluster
if not clusterList:
clusterObj = cluster(alignmentObj, clippedSide)
clusterObj.addClippedRead(alignmentObj)
clusterList.append(clusterObj)
# B) There is already at least one cluster in the list -> Check if current clipped read within the latest cluster
else:
## Define bkp position:
bkpPos = alignmentObj.reference_start if clippedSide == "beg" else alignmentObj.reference_end
## Define cluster range for searching for overlap
lastClusterObj = clusterList[-1]
begClusterRange = lastClusterObj.bkpPos
endClusterRange = lastClusterObj.bkpPos + 3
#### Check if clipped read within cluster range
overlapping = overlap(bkpPos, bkpPos, begClusterRange, endClusterRange)
## a) Overlapping ranges, so clipped read within previous cluster interval -> add read to the cluster
if overlapping:
lastClusterObj.addClippedRead(alignmentObj)
## b) Clipped read outside previous cluster interval -> create new cluster and add it into the list
else:
clusterObj = cluster(alignmentObj, clippedSide)
clusterObj.addClippedRead(alignmentObj)
clusterList.append(clusterObj)
### 3. Filter the clusters according to the number of reads supporting them (min and max cut-offs)
filteredClusterList = []
for clusterObj in clusterList:
if (clusterObj.nbReads() >= minNbReads) and (clusterObj.nbReads() <= maxNbReads):
filteredClusterList.append(clusterObj)
return filteredClusterList
def filterNbClusters(clusterBegList, clusterEndList, maxNbClusters):
'''
'''
totalNbClusters = len(clusterBegList) + len(clusterEndList)
## A) Number of clipped clusters higher than the treshold -> Discard clusters as most likely are the consequence of
# alignment artefacts. In a perfect scenario we would expect two clusters, a single one per breakpoint
if (totalNbClusters > maxNbClusters):
filteredClusterBegList = []
filteredClusterEndList = []
## B) Pass the filter
else:
filteredClusterBegList = clusterBegList
filteredClusterEndList = clusterEndList
return filteredClusterBegList, filteredClusterEndList
def filterDiscordantCluster(chrom, beg, end, readPairList, bamFile):
'''
'''
nbDiscordant = len(readPairList)
nbClippedBothSides = 0
readPairFilteredList = []
## Extract alignments in the interval
iterator = bamFile.fetch(chrom, beg, end)
## Iterate over the alignments
for alignmentObj in iterator:
## Supporting discordant paired-end read and cigar available
if (alignmentObj.query_name in readPairList) and (alignmentObj.cigartuples is not None):
firstOperation = alignmentObj.cigartuples[0][0]
lastOperation = alignmentObj.cigartuples[-1][0]
### A) Read clipped both in the beginning and ending
if ((firstOperation == 4) or (firstOperation == 5)) and ((lastOperation == 4) or (lastOperation == 5)):
nbClippedBothSides += 1
### B) Read not clipped in both sides
else:
readPairFilteredList.append(alignmentObj.query_name)
## Percentage of supporting paired ends that are clipped on both sides
percClippedBothSides = float(nbClippedBothSides) / nbDiscordant * 100
## Recompute the number of supporting paired ends after removing problematic reads
readPairFilteredList = list(set(readPairFilteredList))
nbFilteredDiscordant = len(readPairFilteredList)
## Discard cluster if more than 50% supporting paired-ends clipped on both sides:
if (percClippedBothSides > 50):
print "FILTER-CLUSTER: ", nbClippedBothSides, nbDiscordant, percClippedBothSides, nbFilteredDiscordant, readPairFilteredList
readPairFilteredList = []
nbFilteredDiscordant = 0
filtered = True
else:
filtered = False
return filtered
#### MAIN ####
## Import modules ##
import argparse
import sys
import os
import time
from operator import itemgetter, attrgetter, methodcaller
import pysam
import itertools
import subprocess
# Global variables:
global debugBool ## debug logging mode. Boolean.
# Environmental variables:
PICARD = os.environ['PICARD']
## Get user's input ##
parser = argparse.ArgumentParser(description= "")
parser.add_argument('insertions', help='')
parser.add_argument('bam', help='Bam file')
parser.add_argument('--windowSize', default=50, dest='windowSize', type=int, help='Window size to search for clipped read clusters from discordant read-pair clusters ends. Default=50bp' )
parser.add_argument('--minNbReads', default=1, dest='minNbReads', type=int, help='Minimum number of clipped reads composing the cluster. Default: 1' )
parser.add_argument('--maxNbReads', default=500, dest='maxNbReads', type=int, help='Maximum number of clipped reads composing the cluster. Default: 500' )
parser.add_argument('--maxNbClusters', default=10, dest='maxNbClusters', type=int, help='Maximum number of clipped read clusters in the insertion region. Default: 10' )
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
insertionsPath = args.insertions
bam = args.bam
windowSize = args.windowSize
minNbReads = args.minNbReads
maxNbReads = args.maxNbReads
maxNbClusters = args.maxNbClusters
outDir = args.outDir
tmpDir = outDir + '/tmp'
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "insertionsPath: ", insertionsPath
print "bam: ", bam
print "windowSize: ", windowSize
print "minNbReads: ", minNbReads
print "maxNbReads: ", maxNbReads
print "maxNbClusters: ", maxNbClusters
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
## Open input files
insertions = open(insertionsPath, 'r')
## Open donor's BAM files for reading
bamFile = pysam.AlignmentFile(bam, "rb")
clustersDict = {}
discordantReadPairList = []
## Read insertions file line by line
for line in insertions:
## Ignore comment lines (e.g. header)
if line.startswith('#'):
continue
line = line.rstrip('\n')
fieldsList = line.split("\t")
## Insertion line with the expected number of columns
if (int(len(fieldsList)) == 31):
chrPlus = fieldsList[0]
begPlus = fieldsList[1]
endPlus = fieldsList[2]
nbReadsPlus = fieldsList[3]
familyPlus = fieldsList[4]
readPairListPlus = fieldsList[5].split(",")
chrMinus = fieldsList[6]
begMinus = fieldsList[7]
endMinus = fieldsList[8]
nbReadsMinus = fieldsList[9]
familyMinus = fieldsList[10]
readPairListMinus = fieldsList[11].split(",")
insertionType = fieldsList[12]
rgType = fieldsList[30]
print "###### INSERTION: ", chrPlus, begPlus, endPlus, chrMinus, begMinus, endMinus, rgType
## Add discordant read pairs to the list:
discordantReadPairList = discordantReadPairList + readPairListPlus + readPairListMinus
## Define an insertion id (insertion coordinates defined by the end
# of + cluster and beg of - cluster)
if familyPlus == 'Other': # temporary fix
familyPlus = 'SVA'
insertionId = familyPlus + ":" + insertionType + ":" + chrPlus + "_" + endPlus + "_" + begMinus
### 0. Refine discordant paired end clusters:
## A) Paired clusters
if (begMinus != "NA") and (begMinus != "UNK"):
filteredPlus = filterDiscordantCluster(chrPlus, int(begPlus), int(endPlus), readPairListPlus, bamFile)
filteredMinus = filterDiscordantCluster(chrMinus, int(begMinus), int(endMinus), readPairListMinus, bamFile)
## B) Unpaired cluster
else:
filteredPlus = filterDiscordantCluster(chrPlus, int(begPlus), int(endPlus), readPairListPlus, bamFile)
filteredMinus = False
## Discard those insertions with a high percentage of both-sides clipped reads supporting at least one of the clusters:
if (filteredPlus == True) or (filteredMinus == True):
clusterBegFilteredList = []
clusterEndFilteredList = []
else:
### 1. Search for clipped reads
## A) Paired clusters
if (begMinus != "NA") and (begMinus != "UNK"):
clippedBegList, clippedEndList = getClippedPairedClusters(chrPlus, begPlus, endPlus, chrMinus, begMinus, endMinus, rgType, bamFile, windowSize)
## B) Unpaired cluster
else:
clippedBegList, clippedEndList = getClippedUnpairedCluster(chrPlus, begPlus, endPlus, bamFile, windowSize)
### 2. Cluster clipped reads:
### 2.1 Tumour
clusterBegList = clusterCLipped(clippedBegList, "beg", minNbReads, maxNbReads)
clusterEndList = clusterCLipped(clippedEndList, "end", minNbReads, maxNbReads)
### 3. Filter clusters of clipped reads:
## 3.1 Filter by the number of clipped-read clusters
clusterBegFilteredList, clusterEndFilteredList = filterNbClusters(clusterBegList, clusterEndList, maxNbClusters)
### 4. Add the 2 cluster lists to the dictionary:
clustersDict[insertionId] = {}
clustersDict[insertionId]["beg"] = clusterBegFilteredList
clustersDict[insertionId]["end"] = clusterEndFilteredList
bamFile.close()
## 2) Make fasta containing the discordant paired-end reads +
##############################################################
# the reads supporting the clusters of clipped reads
####################################################
## 1. Make list containing the discordant paired-end reads
allReadPairIdList = discordantReadPairList
## 2. Add to the list the reads supporting the clusters of clipped reads
for insertionId in clustersDict:
clusterBegList = clustersDict[insertionId]["beg"]
clusterEndList = clustersDict[insertionId]["end"]
for clusterObj in clusterBegList:
readPairIdList = [readId.split("/")[0] for readId in clusterObj.readIdList()]
allReadPairIdList = allReadPairIdList + readPairIdList
for clusterObj in clusterEndList:
readPairIdList = [readId.split("/")[0] for readId in clusterObj.readIdList()]
allReadPairIdList = allReadPairIdList + readPairIdList
allReadPairIdList = list(set(allReadPairIdList))
## 3. Make file containing the supporting read ids
readPairsPath = outDir +'/allReadPairs.txt'
readPairsFile = open(readPairsPath, 'w')
for readPairId in allReadPairIdList:
row = readPairId + "\n"
readPairsFile.write(row)
## Important to close! otherwhise next step won't work properly...
readPairsFile.close()
## 4. Extract read sequences with picard and generate fasta
readPairsFasta = outDir + '/allReadPairs.fa'
command = PICARD + ' FilterSamReads I=' + bam + ' O=/dev/stdout READ_LIST_FILE=' + readPairsPath + ' FILTER=includeReadList WRITE_READS_FILES=false VALIDATION_STRINGENCY=SILENT QUIET=true | samtools fasta - > ' + readPairsFasta
print command
os.system(command)
## 3) Add to the reads supporting the clusters its complete sequence from fasta and
####################################################################################
# generate consensus sequence
##############################
fastaObj = fasta()
fastaObj.fasta_reader(readPairsFasta)
for insertionId in clustersDict:
print "********** ", insertionId, " *************"
clusterBegList = clustersDict[insertionId]["beg"]
clusterEndList = clustersDict[insertionId]["end"]
#print "--- clusterBeg ---"
for clusterObj in clusterBegList:
clusterId = clusterObj.chrom + "_" + str(clusterObj.bkpPos) + "_" + clusterObj.clippedSide + "_" + str(clusterObj.nbReads())
consensusDir = tmpDir + '/' + clusterId
clusterObj.addReadSeqs(fastaObj)
clusterObj.consensusSeq = clusterObj.makeConsensusSeq(consensusDir)
#print "--- clusterEnd ---"
for clusterObj in clusterEndList:
clusterId = clusterObj.chrom + "_" + str(clusterObj.bkpPos) + "_" + clusterObj.clippedSide + "_" + str(clusterObj.nbReads())
consensusDir = tmpDir + '/' + clusterId
clusterObj.addReadSeqs(fastaObj)
clusterObj.consensusSeq = clusterObj.makeConsensusSeq(consensusDir)
## 4) For each insertion generate a fasta containing the consensus sequences for each cluster
##############################################################################################
for insertionId in clustersDict:
print "********** ", insertionId, " *************"
fastaDict = {}
clusterList = clustersDict[insertionId]["beg"] + clustersDict[insertionId]["end"]
## For each cluster
for clusterObj in clusterList:
## Include into the header the clipped read ids..
header = "cluster" + "_" + clusterObj.chrom + "_" + str(clusterObj.bkpPos) + "_" + clusterObj.clippedSide + "_" + str(clusterObj.nbReads()) + "\t" + ",".join(clusterObj.readIdList())
fastaDict[header] = clusterObj.consensusSeq
fastaObj = fasta()
fastaObj.fastaDict = fastaDict
## Write into the output file
fileName = insertionId + ".fa"
outFilePath = outDir + "/" + fileName
fastaObj.write_fasta(outFilePath)
### Make cleanup and finish
command = 'rm -r ' + readPairsPath + ' ' + tmpDir
os.system(command) # returns the exit status
print "***** Finished! *****"
print
|
brguez/TEIBA
|
src/python/clusterClippedReads.py
|
Python
|
gpl-3.0
| 25,308
|
[
"pysam"
] |
41c2de8e7ea0c4cf51e59a6d8f8e68907d257bafe1b785c557c240de6b3cced8
|
import numpy as np
import pytest
import scipy.stats as stats
@pytest.fixture(scope='function')
def dimensions():
return 3
@pytest.fixture(scope='function')
def observations():
return 25
@pytest.fixture(scope='function')
def iterations():
return 1000
@pytest.fixture(scope='function')
def data(dimensions, observations):
np.random.seed(1234)
return np.random.randn(dimensions, observations)
# TODO: parametrise this, so that we can test multiple distributions (i.e.
# multivariate gaussian, uniform, cauchy, mixture of gaussians
@pytest.fixture(scope='function')
def multivariate_normal(dimensions):
return stats.multivariate_normal(
mean=np.zeros(dimensions), cov=np.eye(dimensions))
|
IamGianluca/algorithms
|
ml/tests/conftest.py
|
Python
|
mit
| 730
|
[
"Gaussian"
] |
2b79d300678c5f8e2df868db771b68386af7e2cc9866765bb259b874dd1a414f
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_sslkeyandcertificate
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of SSLKeyAndCertificate Avi RESTful Object
description:
- This module is used to configure SSLKeyAndCertificate object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
ca_certs:
description:
- Ca certificates in certificate chain.
certificate:
description:
- Sslcertificate settings for sslkeyandcertificate.
required: true
certificate_management_profile_ref:
description:
- It is a reference to an object of type certificatemanagementprofile.
created_by:
description:
- Creator name.
dynamic_params:
description:
- Dynamic parameters needed for certificate management profile.
enckey_base64:
description:
- Encrypted private key corresponding to the private key (e.g.
- Those generated by an hsm such as thales nshield).
enckey_name:
description:
- Name of the encrypted private key (e.g.
- Those generated by an hsm such as thales nshield).
hardwaresecuritymodulegroup_ref:
description:
- It is a reference to an object of type hardwaresecuritymodulegroup.
key:
description:
- Private key.
key_params:
description:
- Sslkeyparams settings for sslkeyandcertificate.
name:
description:
- Name of the object.
required: true
status:
description:
- Enum options - ssl_certificate_finished, ssl_certificate_pending.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_FINISHED.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Enum options - ssl_certificate_type_virtualservice, ssl_certificate_type_system, ssl_certificate_type_ca.
- Default value when not specified in API or module is interpreted by Avi Controller as SSL_CERTIFICATE_TYPE_VIRTUALSERVICE.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a SSL Key and Certificate
avi_sslkeyandcertificate:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
key: |
-----BEGIN PRIVATE KEY-----
....
-----END PRIVATE KEY-----
certificate:
self_signed: true
certificate: |
-----BEGIN CERTIFICATE-----
....
-----END CERTIFICATE-----
type: SSL_CERTIFICATE_TYPE_VIRTUALSERVICE
name: MyTestCert
"""
RETURN = '''
obj:
description: SSLKeyAndCertificate (api/sslkeyandcertificate) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
ca_certs=dict(type='list',),
certificate=dict(type='dict', required=True),
certificate_management_profile_ref=dict(type='str',),
created_by=dict(type='str',),
dynamic_params=dict(type='list',),
enckey_base64=dict(type='str',),
enckey_name=dict(type='str',),
hardwaresecuritymodulegroup_ref=dict(type='str',),
key=dict(type='str', no_log=True,),
key_params=dict(type='dict',),
name=dict(type='str', required=True),
status=dict(type='str',),
tenant_ref=dict(type='str',),
type=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'sslkeyandcertificate',
set(['key']))
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/network/avi/avi_sslkeyandcertificate.py
|
Python
|
gpl-3.0
| 5,906
|
[
"VisIt"
] |
1cd6c5fdb1e04ec04c7cbce7afa992678be7d2dcfddfed56a1ff4b12f3e80cca
|
"""Test scriptlet to add an additional ball into play"""
# add_a_ball.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
from mpf.system.scriptlet import Scriptlet
class AddABall(Scriptlet):
def on_load(self):
self.machine.events.add_handler('sw_buy_in', self.add_ball)
def add_ball(self):
self.machine.game.add_balls_in_play(1)
self.machine.playfield.add_ball()
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
jabdoa2/mpf
|
machine_files/demo_man/scriptlets/add_a_ball.py
|
Python
|
mit
| 1,631
|
[
"Brian"
] |
5f7166465aedf40201ee8f1447b9b148cd0bdea2d476f1de32f735bc3f9cd93b
|
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import LSTM,Dense,Bidirectional
from sklearn.preprocessing import MinMaxScaler
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
def get_rnn_model(input_shape):
model = Sequential()
model.add(LSTM(50,input_shape=input_shape,recurrent_dropout=0.2,dropout=0.3,return_sequences=True))
model.add(Bidirectional(LSTM(50,recurrent_dropout=0.2,dropout=0.2),merge_mode='ave'))
model.add(Dense(1,activation='sigmoid'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
return model
def rnn_evaluate(features,class_labels):
features = MinMaxScaler(feature_range=(-1,1),copy=False).fit_transform(features)
features = features.reshape(features.shape[0],1,features.shape[1])
model = get_rnn_model((features.shape[1],features.shape[2]))
model.fit(features,class_labels,batch_size=1024,epochs=50,validation_split=0.3)
filename = 'fordTrain.csv'
alert_frame = pd.read_csv(filename)
class_labels = alert_frame['IsAlert'].values
del alert_frame['TrialID']
del alert_frame['ObsNum']
del alert_frame['IsAlert']
features = alert_frame.values
X_train,X_test,y_train,y_test = train_test_split(features,class_labels,test_size=0.2,shuffle=False,random_state=42)
classifier_list,classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
rnn_evaluate(features,class_labels)
|
rupakc/Kaggle-Compendium
|
Stay Alert - The Ford Challenge/ford_baseline.py
|
Python
|
mit
| 3,772
|
[
"Gaussian"
] |
12692cec6c5304a23c73238092cdec7fb690b20092ae7c0ef46f57b42c57e300
|
#!/usr/bin/env python
"""
Install.py tool to download, unpack, build, and link to the LATTE library
used to automate the steps described in the README file in this dir
"""
from __future__ import print_function
import sys, os, subprocess, shutil, tarfile
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import fullpath, geturl, checkmd5sum
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
# settings
version = '1.2.2'
suffix = 'gfortran'
# known checksums for different LATTE versions. used to validate the download.
checksums = { \
'1.1.0' : '533635721ee222d0ed2925a18fb5b294', \
'1.2.0' : '68bf0db879da5e068a71281020239ae7', \
'1.2.1' : '85ac414fdada2d04619c8f936344df14', \
'1.2.2' : '820e73a457ced178c08c71389a385de7', \
}
# help message
HELP = """
Syntax from src dir: make lib-latte args="-b"
or: make lib-latte args="-p /usr/local/latte"
or: make lib-latte args="-m gfortran"
or: make lib-latte args="-b -v 1.2.2"
Syntax from lib dir: python Install.py -b
or: python Install.py -p /usr/local/latte
or: python Install.py -m gfortran
or: python Install.py -v 1.2.2 -b
Example:
make lib-latte args="-b -m gfortran" # download/build in lib/latte
make lib-latte args="-p $HOME/latte" # use existing LATTE installation
"""
pgroup = parser.add_mutually_exclusive_group()
pgroup.add_argument("-b", "--build", action="store_true",
help="download and build the LATTE library")
pgroup.add_argument("-p", "--path",
help="specify folder of existing LATTE installation")
parser.add_argument("-m", "--machine", choices=['gfortran', 'ifort', 'linalg', 'serial', 'mpi'],
help="suffix of a Makefile.lammps.* file used for linking LAMMPS with this library")
parser.add_argument("-v", "--version", default=version,
help="set version of LATTE to download and build (default: %s)" % version)
args = parser.parse_args()
# print help message and exit, if neither build nor path options are given
if not args.build and not args.path:
parser.print_help()
sys.exit(HELP)
homepath = fullpath(".")
buildflag = args.build
pathflag = args.path is not None
version = args.version
suffixflag = args.machine is not None
suffix = args.machine
if pathflag:
lattedir = args.path
if not os.path.isdir(lattedir):
sys.exit("LATTE path %s does not exist" % lattedir)
lattedir = fullpath(lattedir)
homedir = "LATTE-%s" % version
if buildflag:
url = "https://github.com/lanl/LATTE/archive/v%s.tar.gz" % version
lattepath = fullpath(homepath)
lattedir = os.path.join(lattepath, homedir)
# download and unpack LATTE tarball
if buildflag:
print("Downloading LATTE ...")
geturl(url, "LATTE.tar.gz")
# verify downloaded archive integrity via md5 checksum, if known.
if version in checksums:
if not checkmd5sum(checksums[version], 'LATTE.tar.gz'):
sys.exit("Checksum for LATTE library does not match")
print("Unpacking LATTE ...")
if os.path.exists(lattedir):
shutil.rmtree(lattedir)
if tarfile.is_tarfile('LATTE.tar.gz'):
tgz = tarfile.open('LATTE.tar.gz')
tgz.extractall()
os.remove('LATTE.tar.gz')
else:
sys.exit("File LATTE.tar.gz is not a supported archive")
# build LATTE
print("Building LATTE ...")
cmd = 'cd "%s"; make' % lattedir
try:
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print(txt.decode('UTF-8'))
except subprocess.CalledProcessError as e:
sys.exit("Make failed with:\n %s" % e.output.decode('UTF-8'))
# create 3 links in lib/latte to LATTE dirs
print("Creating links to LATTE files")
if os.path.isfile("includelink") or os.path.islink("includelink"):
os.remove("includelink")
if os.path.isfile("liblink") or os.path.islink("liblink"):
os.remove("liblink")
if os.path.isfile("filelink.o") or os.path.islink("filelink.o"):
os.remove("filelink.o")
os.symlink(os.path.join(lattedir, 'src'), 'includelink')
os.symlink(lattedir, 'liblink')
os.symlink(os.path.join(lattedir, 'src', 'latte_c_bind.o'), 'filelink.o')
# copy Makefile.lammps.suffix to Makefile.lammps
if suffixflag or not os.path.exists("Makefile.lammps"):
if suffix is None:
suffix = 'gfortran'
print("Creating Makefile.lammps")
if os.path.exists("Makefile.lammps.%s" % suffix):
shutil.copyfile("Makefile.lammps.%s" % suffix, 'Makefile.lammps')
|
stanmoore1/lammps
|
lib/latte/Install.py
|
Python
|
gpl-2.0
| 4,571
|
[
"LAMMPS"
] |
47175d2c572ee378d86ab26cbc2fa14299e89dbdd450f6fa76e8c4557898b8ac
|
##################################################################
##
## MoPLUGS main menu
##
## Author Sergey Solohin (Neill3d) 2014-2016
## e-mail to: s@neill3d.com
## www.neill3d.com
##
## Github repo - https://github.com/Neill3d/MoPlugs
## Licensed under BSD 3-clause
## https://github.com/Neill3d/MoPlugs/blob/master/LICENSE
##
##################################################################
from pyfbsdk import *
import webbrowser
import os
AboutInfo = "Version from 04.03.2016"
gApp = FBApplication()
gSystem = FBSystem()
gScriptAssignUVSet = 0
gScriptSelChAndCR = 1
gScriptCheckFacialDataVideoPlane = 3
gScriptCopyBlendShapeAnimation = 4
gScriptSaveLocalPositions = 5
gScriptExtractTrajectoryPath = 6
gScriptDuplicateAlongACurveSetup = 7
gScriptDuplicateAlongACurveRotate90 = 70
gScriptDuplicateAlongACurveSelectNulls = 71
gScriptDuplicateAlongACurveSelectModels = 72
gScriptDuplicateAlongACurveDeform = 73
gScriptDuplicateAlongACurveHardPlot = 74
gScriptFixInstancesGeometry = 75
gScriptGroupedWithNull = 8
gScriptInsertParent = 9
gScriptFacialRigConnectJoints = 10
gScriptFacialRigCreateVideoPlane = 11
gScriptFacialRigHardPlot = 12
gScriptFacialRigOpticalPostProcessing = 13
gScriptFacialRigSetupConstraints = 14
gScriptFromLayeredToSingleTexture = 15
gScriptRefreshModelsTextures = 16
gScriptRefreshTextures = 17
gScriptLightsHardTurnOnOff = 18
gScriptLightsTurnOnOff = 19
gScriptMakeCircle = 20
gScripts = {
gScriptAssignUVSet : "Assign Lightmap UVSet",
gScriptSelChAndCR : "Select Char and ControlRig from Models",
gScriptCheckFacialDataVideoPlane : "CHECK FacialData VideoPlane",
gScriptCopyBlendShapeAnimation : "Copy BlendShape Animation",
gScriptSaveLocalPositions : "Save Local Positions",
gScriptExtractTrajectoryPath : "Extract Trajectory Path",
gScriptDuplicateAlongACurveSetup : "Duplicate Along a Curve SETUP",
gScriptDuplicateAlongACurveRotate90 : "Duplicate Along a Curve Rotate By 90",
gScriptDuplicateAlongACurveSelectNulls : "Duplicate Along a Curve Select Nulls",
gScriptDuplicateAlongACurveSelectModels : "Duplicate Along a Curve Select Models",
gScriptDuplicateAlongACurveDeform : "Duplicate Along A Curve Deform Meshes",
gScriptDuplicateAlongACurveHardPlot : "Models Animation Hard Plot",
gScriptFixInstancesGeometry : "Fix Instances Geometry",
gScriptGroupedWithNull : "Grouped With Null",
gScriptInsertParent : "Insert Parent",
gScriptFacialRigConnectJoints : "Facial RIG - Connect Joints",
gScriptFacialRigCreateVideoPlane : "Facial RIG - Create VideoPlane",
gScriptFacialRigHardPlot : "Facial RIG - Hard Plot",
gScriptFacialRigOpticalPostProcessing : "Facial RIG - Optical Post Processing",
gScriptFacialRigSetupConstraints : "Facial RIG - Setup Constraints",
gScriptFromLayeredToSingleTexture : "From Layered to Single Texture",
gScriptRefreshModelsTextures : "Refresh Selected Models Textures",
gScriptRefreshTextures : "Refresh Textures",
gScriptLightsHardTurnOnOff : "Lights - Hard Turn On/Off",
gScriptLightsTurnOnOff : "Lights - Turn On/Off",
gScriptMakeCircle : "Make A Circle" }
def TryToExecuteScript(filename):
paths = gSystem.GetPythonStartupPath()
for path in paths:
items = path.split("\\")
scriptpath = ""
if len(items) > 0:
scriptpath = items[0]
for i in range(1, len(items)-1):
scriptpath = scriptpath + "\\" + items[i]
scriptpath = scriptpath + "\\PythonActions\\" + filename
print scriptpath
if os.path.isfile( scriptpath ):
gApp.ExecuteScript(scriptpath)
return
FBMessageBox( "MoScripts", "The script is missing", "Ok" )
def MenuScriptsItemActivate(control, event):
print "editing item"
if event.Id == gScriptAssignUVSet:
TryToExecuteScript( "AssignUVSet.py" )
elif event.Id == gScriptSelChAndCR:
TryToExecuteScript( "SelCharAndCR_ByModelSelection.py" )
elif event.Id == gScriptCheckFacialDataVideoPlane:
TryToExecuteScript( "CHECK_FacialData_VideoPlane.py" )
elif event.Id == gScriptCopyBlendShapeAnimation:
TryToExecuteScript( "CopyBlendShapeAnimation.py" )
elif event.Id == gScriptSaveLocalPositions:
TryToExecuteScript( "SaveLocalPositions.py" )
elif event.Id == gScriptExtractTrajectoryPath:
TryToExecuteScript( "ExtractTrajectoryPath.py" )
elif event.Id == gScriptDuplicateAlongACurveSetup:
TryToExecuteScript( "DuplicateAlongACurve_SETUP.py" )
elif event.Id == gScriptDuplicateAlongACurveRotate90:
TryToExecuteScript( "DuplicateAlongACurve_RotateBy90.py" )
elif event.Id == gScriptDuplicateAlongACurveSelectNulls:
TryToExecuteScript( "DuplicateAlongACurve_SelectNulls.py" )
elif event.Id == gScriptDuplicateAlongACurveSelectModels:
TryToExecuteScript( "DuplicateAlongACurve_SelectModels.py" )
elif event.Id == gScriptDuplicateAlongACurveDeform:
TryToExecuteScript( "DuplicateAlongACurve_DeformMesh.py" )
elif event.Id == gScriptDuplicateAlongACurveHardPlot:
TryToExecuteScript( "DuplicateAlongACurve_HardPlot.py" )
elif event.Id == gScriptFixInstancesGeometry:
TryToExecuteScript( "FixInstancesGeometry.py" )
elif event.Id == gScriptGroupedWithNull:
TryToExecuteScript( "GroupedWithNull.py" )
elif event.Id == gScriptInsertParent:
TryToExecuteScript( "InsertParent.py" )
elif event.Id == gScriptFacialRigConnectJoints:
TryToExecuteScript( "FacialRIG_connectJoints.py" )
elif event.Id == gScriptFacialRigCreateVideoPlane:
TryToExecuteScript( "FacialRIG_CreateVideoPlane.py" )
elif event.Id == gScriptFacialRigHardPlot:
TryToExecuteScript( "FacialRIG_HardPlot.py" )
elif event.Id == gScriptFacialRigOpticalPostProcessing:
TryToExecuteScript( "FacialRIG_OpticalPostProcessing.py" )
elif event.Id == gScriptFacialRigSetupConstraints:
TryToExecuteScript( "FacialRIG_SetupConstraints.py" )
elif event.Id == gScriptFromLayeredToSingleTexture:
TryToExecuteScript( "FromLayeredToSingleTexture.py" )
elif event.Id == gScriptRefreshModelsTextures:
TryToExecuteScript( "RefreshModelsTextures.py" )
elif event.Id == gScriptRefreshTextures:
TryToExecuteScript( "RefreshTextures.py" )
elif event.Id == gScriptLightsHardTurnOnOff:
TryToExecuteScript( "Lights_HardTurnOnOff.py" )
elif event.Id == gScriptLightsTurnOnOff:
TryToExecuteScript( "Lights_TurnOnOff.py" )
elif event.Id == gScriptMakeCircle:
TryToExecuteScript( "MakeCircle.py" )
def MenuEditingItemActivate(control, event):
print "editing item"
def MenuToolsItemActivate(control, event):
if event.Id == 0:
FBPopNormalTool( "BlendShape Manager" )
elif event.Id == 1:
FBPopNormalTool( "Sculpt Brush Tool" )
elif event.Id == 3:
FBPopNormalTool( "Composite Master" )
elif event.Id == 4:
FBPopNormalTool( "Dynamic Masks" )
elif event.Id == 5:
FBPopNormalTool( "Render Layers" )
elif event.Id == 6:
FBPopNormalTool( "Render Options" )
elif event.Id == 8:
FBPopNormalTool( "Textures Browser" )
elif event.Id == 20:
FBPopNormalTool( "MoPlugs Settings Tool" )
#
def MenuHelpItemActivate(control, event):
if event.Id == 1:
webbrowser.open('http://neill3d.com')
elif event.Id == 2:
webbrowser.open('https://drive.google.com/folderview?id=0B83XZ3TC_S6PT0ZycFlabW9aWmM&usp=sharing')
elif event.Id == 3:
FBMessageBox( "MoPlugs Project", AboutInfo, "Ok" )
#
def MenuInit():
menuMgr = FBMenuManager()
#
## MoHELP Menu
#
menu = menuMgr.GetMenu("MoHELP")
if not menu:
menu = menuMgr.InsertAfter(None,"Window", "MoHELP").Menu
menu.OnMenuActivate.Add(MenuHelpItemActivate)
menu.InsertLast( "Visit MoPlugs store", 0 );
menu.InsertLast( "Visit Neill3d blog", 1 );
menu.InsertLast( "", 100 );
menu.InsertLast( "Documentation", 2 );
menu.InsertLast( "", 200 );
menu.InsertLast( "&About", 3 );
#
## MoTOOLS Menu
#
menu = menuMgr.GetMenu("MoTOOLS")
if not menu:
menu = menuMgr.InsertAfter(None,"Window", "MoTOOLS").Menu
menu.OnMenuActivate.Add(MenuToolsItemActivate)
menu.InsertLast( "Settings", 20 );
menu.InsertLast( "", 21 );
menu.InsertLast( "BlendShape Tool", 0 );
menu.InsertLast( "&Sculpt brush", 1 );
menu.InsertLast( "", 2 );
menu.InsertLast( "&Composite Master", 3 );
menu.InsertLast( "&Dynamic Mask Tool", 4 );
menu.InsertLast( "&Render Layers Tool", 5 );
menu.InsertLast( "Render &Layers Options", 6 );
menu.InsertLast( "", 7 );
menu.InsertLast( "&Textures Browser", 8 );
#
## MoEditing Menu
#
menu = menuMgr.GetMenu("MoEDITING")
if not menu:
menu = menuMgr.InsertAfter(None,"Window", "MoEDITING").Menu
menu.OnMenuActivate.Add(MenuEditingItemActivate)
menu.InsertLast( "Make Snapshot", 0 )
menu.InsertLast( "Combine Models", 1)
menu.InsertLast( "", 100 )
menu.InsertLast( "Center Pivot", 2)
menu.InsertLast( "", 200 )
menu.InsertLast( "ReCompute Normals", 3)
menu.InsertLast( "Invert Normals", 4)
menu.InsertLast( "", 300 )
menu.InsertLast( "Save Blendshapes As...", 5 )
menu.InsertLast( "Load Blendshapes...", 6)
menu.InsertLast( "", 400 )
menu.InsertLast( "Optimize skin weights", 7 )
#
## MoEditing Menu
#
menu = menuMgr.GetMenu("MoSCRIPTS")
if not menu:
menu = menuMgr.InsertAfter(None,"Window", "MoSCRIPTS").Menu
menu.OnMenuActivate.Add(MenuScriptsItemActivate)
menu.InsertLast( gScripts[gScriptAssignUVSet], gScriptAssignUVSet )
menu.InsertLast( gScripts[gScriptSelChAndCR], gScriptSelChAndCR )
menu.InsertLast( "", 100 )
menu.InsertLast( gScripts[gScriptCheckFacialDataVideoPlane], gScriptCheckFacialDataVideoPlane)
menu.InsertLast( "", 200 )
menu.InsertLast( gScripts[gScriptCopyBlendShapeAnimation], gScriptCopyBlendShapeAnimation)
menu.InsertLast( gScripts[gScriptSaveLocalPositions], gScriptSaveLocalPositions )
menu.InsertLast( gScripts[gScriptExtractTrajectoryPath], gScriptExtractTrajectoryPath)
menu.InsertLast( "", 300 )
menu.InsertLast( gScripts[gScriptDuplicateAlongACurveSetup], gScriptDuplicateAlongACurveSetup)
menu.InsertLast( gScripts[gScriptDuplicateAlongACurveRotate90], gScriptDuplicateAlongACurveRotate90)
menu.InsertLast( gScripts[gScriptDuplicateAlongACurveSelectNulls], gScriptDuplicateAlongACurveSelectNulls)
menu.InsertLast( gScripts[gScriptDuplicateAlongACurveSelectModels], gScriptDuplicateAlongACurveSelectModels)
menu.InsertLast( gScripts[gScriptDuplicateAlongACurveDeform], gScriptDuplicateAlongACurveDeform)
menu.InsertLast( gScripts[gScriptDuplicateAlongACurveHardPlot], gScriptDuplicateAlongACurveHardPlot)
menu.InsertLast( "", 350 )
menu.InsertLast( gScripts[gScriptFixInstancesGeometry], gScriptFixInstancesGeometry)
menu.InsertLast( gScripts[gScriptGroupedWithNull], gScriptGroupedWithNull )
menu.InsertLast( gScripts[gScriptInsertParent], gScriptInsertParent )
menu.InsertLast( "", 400 )
menu.InsertLast( gScripts[gScriptFacialRigConnectJoints], gScriptFacialRigConnectJoints )
menu.InsertLast( gScripts[gScriptFacialRigCreateVideoPlane], gScriptFacialRigCreateVideoPlane)
menu.InsertLast( gScripts[gScriptFacialRigHardPlot], gScriptFacialRigHardPlot)
menu.InsertLast( gScripts[gScriptFacialRigOpticalPostProcessing], gScriptFacialRigOpticalPostProcessing)
menu.InsertLast( gScripts[gScriptFacialRigSetupConstraints], gScriptFacialRigSetupConstraints)
menu.InsertLast( "", 500 )
menu.InsertLast( gScripts[gScriptFromLayeredToSingleTexture], gScriptFromLayeredToSingleTexture )
menu.InsertLast( gScripts[gScriptRefreshModelsTextures], gScriptRefreshModelsTextures )
menu.InsertLast( gScripts[gScriptRefreshTextures], gScriptRefreshTextures )
menu.InsertLast( "", 600 )
menu.InsertLast( gScripts[gScriptLightsHardTurnOnOff], gScriptLightsHardTurnOnOff )
menu.InsertLast( gScripts[gScriptLightsTurnOnOff], gScriptLightsTurnOnOff )
menu.InsertLast( "", 700 )
menu.InsertLast( gScripts[gScriptMakeCircle], gScriptMakeCircle )
#
MenuInit()
|
Neill3d/MoPlugs
|
PythonScripts/Startup/MoPlugs_menu.py
|
Python
|
bsd-3-clause
| 12,583
|
[
"VisIt"
] |
0416f97b1d830e26d0875b73ced691fcd6f4bda5858f58f8a37110867b21ea96
|
#!/usr/bin/env python
"""
Show how the posterior gets updated as a set of coin tosses are generated for a biased coin.
Assume a flat prior on the bias weighting and also a Gaussian prior.
"""
import matplotlib.pyplot as pl
from scipy.stats import norm, kstest
import numpy as np
# set plot to render labels using latex
pl.rc('text', usetex=True)
pl.rc('font', family='serif')
pl.rc('font', size=14)
fig = pl.figure(figsize=(12,10), dpi=100)
# numbers of coin tosses
nt = [0, 1, 2, 5, 10, 50, 100, 500, 1000]
bias = 0.3 # biased towards tails
# bias values
H = np.linspace(0., 1., 1000)
# priors
priorflat = np.ones(len(H))
sigmah = 0.05
muh = 0.5
priorgauss = (1./np.sqrt(2.*np.pi*sigmah**2))*np.exp(-0.5*(H-muh)**2/sigmah**2)
curheads = 0. # current number of heads
for i, n in enumerate(nt):
# generate coin tosses (making sure to include previous ones)
if n > 0:
rc = np.random.rand(n-nprev)
curheads = curheads + len(np.zeros(n-nprev)[rc<bias])
# compute likelihood
L = H**curheads * (1.-H)**(n-curheads)
# compute posterior
post1 = L*priorflat
post2 = L*priorgauss
# normalise posterior
post1 = post1/np.trapz(post1, H)
post2 = post2/np.trapz(post2, H)
# plot posterior
pl.subplot(3,3,i+1)
pl.plot(H, post1, 'b', label='$p(H|d,I)$ Uniform prior')
pl.plot(H, post2, 'r', label='$p(H|d,I)$ Gaussian prior')
pl.plot(H, priorgauss, 'k--', label='$p(H|I)$ Gaussian')
ax = pl.gca()
ax.set_yticklabels([])
ax.set_yticks([])
if i == 0:
pl.legend(loc='lower left', fancybox=True, framealpha=0.3, prop={'size': 12})
if i % 3 == 0:
ax.set_ylabel('$p(H|d,I)$')
if i > 5:
ax.set_xlabel('$H$')
else:
ax.set_xticklabels([])
ax.text(0.65, 0.8*ax.get_ylim()[1], '$n=%d$' % n, fontsize=16)
nprev = n
pl.tight_layout()
#fig.subplots_adjust(bottom=0.12)
pl.show()
fig.savefig('../coin_toss_2.pdf')
|
mattpitkin/GraWIToNStatisticsLectures
|
figures/scripts/coin_toss_2.py
|
Python
|
mit
| 1,891
|
[
"Gaussian"
] |
2aa54a9e0bbc5223d4de4c62615c654d9ccdddd496c9521244620b0c3e5827f6
|
# $HeadURL: $
''' FTSstorageUnbanAction
'''
from DIRAC import gConfig, gLogger, S_ERROR, S_OK
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
from DIRAC.ResourceStatusSystem.PolicySystem.Actions.BaseAction import BaseAction
from DIRAC.ResourceStatusSystem.Utilities import RssConfiguration
#from DIRAC.ResourceStatusSystem.Utilities.InfoGetter import InfoGetter
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTS3Servers
import fts3.rest.client.easy as fts3
import json
from DIRAC import gLogger
__RCSID__ = '$Id: $'
class FTSStorageUnbanAction( BaseAction ):
'''
Action that sends to the FTS server a unbanning request for a given list of sites.
'''
def __init__( self, name, decisionParams, enforcementResult, singlePolicyResults, clients ):
super( FTSStorageUnbanAction, self ).__init__( name, decisionParams, enforcementResult,
singlePolicyResults, clients )
# enforcementResult supposed to look like:
# {
# 'Status' : <str>,
# 'Reason' : <str>,
# 'PolicyActions' : <list>,
# [ 'EndDate' : <str> ]
# }
# decisionParams supposed to look like:
# {
# 'element' : None,
# 'name' : None,
# 'elementType' : None,
# 'statusType' : None,
# 'status' : None,
# 'reason' : None,
# 'tokenOwner' : None
# }
def run( self ):
'''
Checks it has the parameters it needs and tries to unban the site.
'''
storageElement = self.decisionParams[ 'name' ]
elementType = self.decisionParams[ 'elementType' ]
if elementType != 'StorageElement':
return S_ERROR( "'elementType' should be 'StorageElement'" )
return self._unbanStorageElement( storageElement )
def _unbanStorageElement( self, storageElement ):
endpoints = getFTS3Servers()[ 'Value' ]
blacklist = {}
for endpoint in endpoints:
#endpoint = 'https://fts3-pilot.cern.ch:8446'
#TODO: maybe proxyPath is not needed since it is picked from the environment by the REST API
proxyPath = getProxyInfo()
if not proxyPath.get('OK'):
return S_ERROR("Proxy not found!")
try:
proxyPath = proxyPath.get('Value').get('path')
except Exception as e:
return S_ERROR(e.message)
context = fts3.Context(endpoint, proxyPath)
fts3.unban_se(context, storageElement)
blacklist[endpoint] = json.loads(context.get("ban/se"))
return S_OK( blacklist )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
marcelovilaca/DIRAC
|
ResourceStatusSystem/PolicySystem/Actions/FTSStorageUnbanAction.py
|
Python
|
gpl-3.0
| 2,800
|
[
"DIRAC"
] |
8489683cbd53a54e8aa52ee020a96a52565d36847d5c3f047265d24397970021
|
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state, deprecated
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs import csr_row_norms
from .validation import array2d, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to (X * X).sum(axis=1), but also supports CSR sparse matrices
and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return array2d(X.T, copy=False, order='F'), True
else:
return array2d(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0,
n_iterations=None):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
if n_iterations is not None:
warnings.warn("n_iterations was renamed to n_iter for consistency "
"and will be removed in 0.16.", DeprecationWarning)
n_iter = n_iterations
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
References
----------
http://stackoverflow.com/q/1208118
"""
arrays = [np.asarray(x).ravel() for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.empty([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:, 0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m, 1:])
for j in xrange(1, arrays[0].size):
out[j * m:(j + 1) * m, 1:] = out[0:m, 1:]
return out
def svd_flip(u, v):
"""Sign correction to ensure deterministic output from SVD
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v: arrays
The output of `linalg.svd` or `sklearn.utils.extmath.randomized_svd`,
with matching inner dimensions so one can compute `np.dot(u * s, v)`.
Returns
-------
u_adjusted, s, v_adjusted: arrays with the same dimensions as the input.
"""
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
return u, v
@deprecated('to be removed in 0.17; use scipy.special.expit or log_logistic')
def logistic_sigmoid(X, log=False, out=None):
"""Logistic function, ``1 / (1 + e ** (-x))``, or its log."""
from .fixes import expit
fn = log_logistic if log else expit
return fn(X, out)
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = array2d(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
|
johnowhitaker/bobibabber
|
sklearn/utils/extmath.py
|
Python
|
mit
| 18,306
|
[
"Gaussian"
] |
07221364e2ba90a9d49d6488d521d23aa4c91612d9dad67128a39654577f2ac9
|
#! /usr/bin/env python3
#
# SCANIT - Control A spectrometer and collect data
#
# LICENSE:
# This work is licensed under the Creative Commons Zero License
# Creative Commons CC0.
# To view a copy of this license, visit
# http://directory.fsf.org/wiki/License:CC0
# or send a letter to:
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
#
# Author: James Luscher, jluscher@gmail.com
#
import sys, string, time
import serial
#
from pathlib import Path
#
from tkinter import *
from tkinter import font
from tkinter import filedialog
from tkinter.ttk import Progressbar
# from tkinter import ttk
# from tkinter.scrolledtext import *
import tkinter.messagebox as mBox
# import tkinter.simpledialog as simpledialog
import matplotlib
from matplotlib.widgets import Cursor
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import numpy
from numpy import searchsorted
siTitle = 'SCANIT for RetroSPEX [v033]' # Program name and version
TANBG = '#F8E2CD' # Background color
WARNC = '#FFBBFF' # Warning color (pinkish)
ACTIVB = '#F07748' # activebackground color for buttons
#
jjltest = True # print messages, testing
comtest = False # print communication diagnostic details (much!)
## code based on example from: http://robotic-controls.com/learn/python-guis/tkinter-serial
# modified for Python3
#
# Serial() argument added: rtscts=1
#
## NOTE: PATCH @ http://sourceforge.net/p/pyserial/patches/37/
# /usr/local/lib/python3.4/dist-packages/serial/serialposix.py
# at (about) line # 480:
# except select.error as e:
# # ignore EAGAIN errors. all other errors are shown
# * # see also http://www.python.org/dev/peps/pep-3151/#select
# * # patch: James Luscher (re:
# * # http://sourceforge.net/p/pyserial/patches/37/ )
# * #if e[0] != errno.EAGAIN:
# * if e.errno != errno.EAGAIN:
# raise SerialException('read failed: %s' % (e,))
#
# communication commands
COMmands = '''
Command => Response Command sent => Response received
----------------------- LOW LEVEL -- FPGA --------------
? => <Help text> Help (display SPEX commands)
c => <header> Clear Screen
i => i Warm Initialize
f => f Reset FPGA
r AA => r AA DD Read DD from address AA (hex)
w AA DD => w AA DD Write data DD to address AA (or V/V??)
s => s AA DD Show AdrREG and DataReg (AA DD ??)
p => p FF Report PMT Control Register setting
b => B n Report Button State, 0/1 (Off/On)
v n => v n Verbose n=0/1 (Off/On)
----------------------- HIGH LEVEL -- SPECTROMETER -----
L n => L n Set LED n=0/1 (Off/On)
D n FFFF => D n FFFF Load DAC #n with FFFF (hex)
A n => A n FFFF Report High Voltage on #n
E n => E n Enable PMT counter #n (0~7), Clears count
T 7FFFFFFF => T 7FFFFFFF Set Integration time, milliseconds
> => Wait For Bang Start Measurement ('!' signals Done)
P n => P n FFFEFFFEFFFF Dump PMT counter #n (0~2)
X s7FFFFFFF => X s7FFFFFFF Move eXcitation, s=+/- (direction), 7FFFFFFF (steps)
M s7FFFFFFF => M s7FFFFFFF Move eMission, s=+/- (direction), 7FFFFFFF (steps)
----------------------- CONTROLLER INITIATED ALERTS ----
=> ! FF Limit reached [bits?] Motion (done?) time (done?)
=> # n Button activity (reports state? 0/1 (Off/On))
'''
COMchr0 = list('?cifrwspbvLDAET>PXM')
RSPalert = ['!','#']
RSPnorm = ['?','r','s','p','B','A','P']
#make our own buffers
serialPort = None # we always start before any port is found
portName = 'OFFLINE' # ... and any connection established
serOutReady = False # RetroSPEX has been Initialized
#
serInBuffer = "" # 'string' type (character input storage)
serOutBuffer = "".encode() # 'byte' type
serInLines = [] # list of complete input lines
#=====================================================================
## SCANIT Window (GUI window for Spectrometer Control & Data Capture)
#
siWinW = 1260 # width
siWinH = 760 # height
#
siWin = Tk()
siWin.title(siTitle)
siWin['bg'] = TANBG # background color
if jjltest:
siWin.geometry('+670+50') # shift to right for testing
transGeom = '+780+250' # ... for 'transient' screens
else:
siWin.geometry('{}x{}+0+0'.format(siWinW,siWinH))
transGeom = '+110+200' # ... for 'transient' screens
#siWin.geometry('{}x{}+80+50'.format(siWinW,siWinH))
#siWin.geometry('+50+50') # window in upper-left of screen
#
monoFont10 = font.Font(family='Ubuntu Mono', size=10)
monoFont12 = font.Font(family='Ubuntu Mono', size=12)
monoFont14 = font.Font(family='Ubuntu Mono', size=14)
monoFont16 = font.Font(family='Ubuntu Mono', size=16)
monoFont24 = font.Font(family='Ubuntu Mono', size=24)
#=====================================================================
## Global variables (for Spectrometer Control & Data Capture)
#
#==============
# settings: configuration data (from 'settings.txt')
#
# User Default Settings to be used for Measurement
# (settable and saved/restored)
varEXinc = StringVar() # Setting EX Inc Wavelength (nm)
varEMinc = StringVar() # Setting EM Inc Wavelength (nm)
varTMinc = StringVar() # Setting TM Inc time (s)
varEXslit = StringVar() # Slit size EX (nm)
varEXslit = StringVar() # Slit size EM (nm)
varEMhv = StringVar() # EM PMT high voltage (v)
varREFhv = StringVar() # REF PMT high voltage (v)
varREFdiodeG = StringVar() # REF DIODE Gain setting [0,1,2,3]
#
#==============
# Live Data (acquired)
#
varLiveEMhv = StringVar() # Live EM PMT high voltage (v)
varLiveREFhv = StringVar() # Live REF PMT high voltage (v)
varLiveEXpos = StringVar() # Live Excitation position nm
varLiveEMpos = StringVar() # Live Excitation position nm
varLiveSignal = StringVar() # Live Signal (PMT) reading (counts)
varLiveReference = StringVar() # Live Reference (diode/PMT) reading (counts)
#
# Transient states
offLine = True # No Spectrometer connection made (serial - USB)
#
#==============
# scan data acquired
#
varScanDataFileName = StringVar() # File name (path) where Scan Data was saved
varScanDataFileName.set('') # none initially
#
scanDataX = [] # X value sample was taken at (wavelength / time)
scanDataY = [] # Y value of sample - PMT counts
#
ax = None # forward referenc for Plot Object (setPlotTitle())
#
#==============
# background: input data from previous scan (for reference)
#
varRefFileName = StringVar() # File name (path) for Reference Data in Plot
varRefFileName.set('') # none initially
#
inputFileHdr = [] # Header section from fileLoad
inputFileData = [] # Data section from fileload
#
backgroundDataX = [] # X value sample was taken at (wavelength / time)
backgroundDataY = [] # Y value of sample - PMT counts
#
#==============
# dayfile: data about the experiments being done today
#
dayFileData = [] # Data section from fileload / or for writing
#
varDayDate = StringVar() # Date this data was entered
varDayMeaning1 = StringVar() # Meaning of Experiment
varDayMeaning2 = StringVar() # Meaning of Experiment
varDayMeaning3 = StringVar() # Meaning of Experiment
varDayEXslit = StringVar() # Excitation slit wavelength nm
varDayEMslit = StringVar() # Emission slit Wavelength nm
varDayBulb = StringVar() # Bulb Intensity
varDayNotebook = StringVar() # Notebook Page
varDayOther1 = StringVar() # Other comments
varDayOther2 = StringVar() # Other comments
varDayOther3 = StringVar() # Other comments
#
#==============
# type of scan
EXscan = 0
EMscan = 1
TMscan = 2
scanName = [ 'EX', 'EM', 'TM' ]
varScanMode = IntVar() # Determines type of scan taken
#
# settings used for scanned data waveforms
#
varEXwaveStart = StringVar() # Excitation Start Wavelength nm
varEXwaveEnd = StringVar() # Excitation End Wavelength nm
varEXwaveInc = StringVar() # Excitation Inc Wavelength nm
#
varEMwaveStart = StringVar() # Emission Start Wavelength nm
varEMwaveEnd = StringVar() # Emission End Wavelength nm
varEMwaveInc = StringVar() # Emission Inc Wavelength nm
#
varTMwavePause = StringVar() # Pause (s)
varTMwaveEnd = StringVar() # End (s)
#
varEXslit = StringVar() # Inc time (s)
varEMslit = StringVar() # Inc time (s)
#
varSpecimenDetails = StringVar() # Description of sample
#
varEXstepsNm = StringVar() # EX StepMotor steps per (nm)
varEMstepsNm = StringVar() # EM StepMotor steps per (nm)
#
varPCTdone = IntVar() # % completion of scan
varPCTdone.set(45) # testing: software completion % ;-)
#
MINnm = 200 # Minimum nanoMeters for monochrometer position
MAXnm = 1000 # Maximum nanoMeters for monochrometer position
#
#
# system and communication constants:
#
thisSys = sys.platform # 'linux','win32,'cygwin','darwin'
firmwareVer = '' # RetroSPEX_Rev-8 (for example)
print('This System: {}'.format(thisSys))
#
if thisSys == 'linux':
portList = ['/dev/ttyACM0','/dev/ttyACM1', '/dev/ttyACM2', 'OFFLINE']
elif thisSys == 'win32':
portList = list('COM'+str(x) for x in range(99,0,-1)).append('OFFLINE')
# ports = ['COM99', 'COM98', ... 'COM2', 'COM1', 'COM0', 'OFFLINE']
else:
msg_ = 'ERROR','Operating System not recognized: {}'
messagebox.showifo(msg_.format(thisSys))
#
EOL = '\n'
OKser = 1 # serial character sent OK. (number sent > 0 Ubuntu)
#
if thisSys == 'linux':
#monoFont = font.Font(family='Ubuntu Mono', size=10)
monoFont = font.Font(family='Ubuntu Mono', size=16)
elif thisSys == 'win32':
monoFont = font.Font(family='Miriam Fixed', size=10)
EOL = '\r\n'
OKser = 0 # serial character sent OK (no error).
else:
messagebox.showifo('ERROR','Operating System not recognized: {}'.format(thisSys))
#
#
def setScanMode_FR(mode): # Forward Reference for setting Scan Mode
if jjltest:
print('CALLED: setScanMode_FR(mode) => pass')
pass
#
def setScanMode(mode):
if jjltest:
print('CALLED: setScanMode(mode) => setScanMode_FR(mode)')
setScanMode_FR(mode)
def updatePlot_FR(): # Forward Reference FUNCTION NAME ... for updating Plot
pass
#
def updatePlot(event=None): # Call the function defined later on...
updatePlot_FR() # ... maps old references to the new routine
#===================
## Utility functions
#
#----
def nop(): # No OPeration (placeholder for function when none required)
pass
#----
def notImplemented(msg):
mBox.showwarning('NOTICE: Not Implemented',msg)
return
# Set and Read High Voltage Power Supply
#
# D 1 FFFF -> 1000 v (neg) Emission PMT
# 0 ~ 65535 -> 1000 v : 65.535 / volt
#
# HV 1:
# SET: [ 900 to E666] :: E666/FFFF -> 0.90000 (58982/65535)*1000 = 900.00
# READ: [BCD4 to 900] :: BCD4/D1B6 -> 0.90042 (48340/53686)*1000 = 900.42
#
# 2.048/2.5 = 0.8192 ** ratio of DAC/ADC reference voltages
# 65.535 * 0.8192 = 53.686 ** ADC conversion divisor (53686) / D1B6
#
#
# HV 1:
# VOLTStoHEX = hex(int(volts * 65.535))[2:]
# 900.0 * 65.535 => hex( int( 58982 ))[2:] = 'e666'
# HEXtoVOLTS = int(setHV1str,16) /( 65.535 * 0.8192 )
# (BCD4) 48340 / 53.686 => 900.42
#
#----
def VOLTStoHEX(voltStr):
"""voltStr = '-905.23' (string) -> 'E7BC' ({positive} Hex string)
DAC: 1000.0 volts full scale (FFFF).
(for setting DAC output)
VOLTStoHEX('1000.0') => 'FFFF'
VOLTStoHEX( '900.0') => 'E665' """
print('VOLTStoHEX(voltStr) - voltStr: {}'.format(voltStr))
s = '{:04x}'.format(int(abs(float(voltStr)) * 65.535 + 0.5)).upper()
return s
#
#----
def HEXtoVOLTS(ADChexStr):
"""ADChexStr = 'BCD4' -> '-900' ({negative} string)
ADC: 1000.0 volts full scale (D1B6).
(for scaling ADC input)
HEXtoVOLTS('D1B6') => '-1000'
HEXtoVOLTS('BCD4') => '-900' """
return '-'+str(int(int(ADChexStr,16) / 53.686 + 0.5))
#
#----
def updateTitle():
'''Display com port, spectrometer firmware and System information
on window title bar.'''
msgSys = ', System: {}'.format(thisSys)
msgPort = ', Port: {}'.format(portName)
msgSPEX = ', Firmware: {}'.format(firmwareVer)
siWin.wm_title( siTitle + msgSys + msgPort + msgSPEX)
return
#----
def digitsOnly(text):
s = ''
for c in text:
if c in string.digits:
s = s + c
if s.strip() == '':
s = '0'
return str( int(s) ) # no leading zeros
#----
def floatOnly(text):
'''get StringVar's value as float().'''
point = False
s = ''
r = ''
for c in text:
if point == False: # no decimal yet
if c in string.digits:
s = s + c
elif c == '.':
point = True
else:
if c in string.digits:
r = r + c
# supress leading zeros
s = s.lstrip('0')
# but keep at least one zero(!)
if len(s) == 0:
s = '0'
# resolution limited to mS
if len(r) > 3:
r = r[0:3]
s = s+ '.' +r
return s
#----
def getVarInt(v):
'''get StringVar's value as int().'''
s = v.get()
if s.strip() == '':
return 0
return int(s)
#----
def getVarFloat(v):
'''get StrinvVar's float value.'''
s = v.get()
#print('getVarFloat(v): s={}, v={}'.format(s,v))
if s.strip() == '':
return 0.0
return float(s)
#----
def setFocus(obj):
obj.focus_set()
return
#----
def toggleBtnVar(var, btn, iconOff, iconOn):
'''Toggle boolean state of Button and set matching icon.'''
if var:
var = False
btn['image'] = iconOff
else:
var = True
btn['image'] = iconOn
return var
#----
def getDateToday():
'''2015-02-14'''
t = time.localtime()
return '{}-{:02d}-{:02d}'.format(t[0],t[1],t[2])
#----
def timeNow():
'''2014-02-14_11:14:47'''
t = time.localtime()
s = '{}-{:02d}-{:02d}_{:02d}:{:02d}:{:02d}'.format(t[0],t[1],t[2],t[3],t[4],t[5])
return s
#----
def setPlotTitle():
'''Plot Title is 2 lines:
Top; filename where scan data was saved (post acquisition).
Second; filename where Reference data was loaded from. (background).'''
#print('setPlotTitle(): {}'.format(varRefFileName.get()))
# Top: Scan Data Save File
if varScanDataFileName.get() == '':
title = timeNow() + '_' + scanName[ varScanMode.get() ] + ':_' + \
varSpecimenDetails.get()[0:40]
else:
title = varScanDataFileName.get()
# Bot: Reference Data Load File
title += '\n' # Bottom (of two) lines
if varRefFileName.get() != '':
title += 'Reference Data File: ' + varRefFileName.get()
#
ax.set_title( title )
return
#----
def readSerial():
global serInBuffer, serOutReady, serInLines, serOutBuffer, firmwareVer
full = True
while full:
# try:
# c = serialPort.read() # attempt to read a 'byte' from Serial
# if c != b'':
# print('readSerial(), RECV: {}'.format(c))
# else:
# full = False
# except:
# siWin.after(20, readSerial) # check serial again soon
# return
c = serialPort.read() # attempt to read a 'byte' from Serial
# if ord(c) > 127:
if c == b'':
return
else:
c = chr( ord(c)%128 )
# if c > byte(0x7F):
# c = b'!' # 7 bit data only
#print('RECV: {}'.format(c))
# c = c.decode('utf-8') # CONVERT 'bytes' to 'string' type
#was anything read and decoded?
if len(c) == 0:
if comtest:
print('^',end='')
full = False
continue
# else: # print each received character
# print('RECV: {}'.format(c))
# check if character is a delimeter
if c == '\r':
continue # don't want returns. ignore it
# synch up first time
if serOutReady == False and c == '\n':
#
# test for RetroSPEX initialized
#
if serInBuffer.startswith('RetroSPEX'):
if jjltest:
print('\nRetroSPEX DETECTED')
firmwareVer = serInBuffer[:]
updateTitle() # Title includes RetroSPEX Rev-#
serInBuffer = ''
serInLines = []
serOutReady = True # RetroSPEX is ready !
serOutBuffer = b'*' # send a response byte
xmitSerial() # acknowledge synch to RetroSPEX
continue
if c == '\n':
if serInBuffer.startswith('Retro'):
firmwareVer = serInBuffer[:] # RetroSPEX firmware version
updateTitle()
else:
# the buffer contains an entire line - less the 'newline'
# proccess the line now.
if jjltest:
print('LINE: {}'.format(serInBuffer))
serInLines.append(serInBuffer) # add to list of input lines
serInBuffer = '' # empty the buffer
else:
serInBuffer += c # add to the buffer
siWin.after(20, readSerial) # check serial again soon
return
#----
def xmitSerial():
global serOutBuffer,serialPort,serOutReady
if comtest:
print('.',end='')
while serOutReady and len(serOutBuffer) > 0: # Anything to send out?
# send byte
c = serOutBuffer[0:1]
nSent = serialPort.write( c )
# print('nSent: {}'.format(nSent))
if nSent != OKser: # show transmit error
if comtest:
patrn = 'xmitSerial({}),ERROR: status => {}, retry.'
print(patrn.format(c,repr(nSent)))
else: # GOOD send
if comtest:
patrn = 'xmitSerial({}), SENT: status => {}.'
print(patrn.format(c,repr(nSent)))
# remove sent character from the buffer
serOutBuffer = serOutBuffer[1:]
siWin.after(2, xmitSerial) # check serial out until empty
siWin.after(20, xmitSerial) # check serial out until empty
return
#----
def writeSerial(text):
global serOutBuffer
#
# Log Communications
# textBox.insert(END, text, 'user_cmd')
# textBox.see(END)
#
# convert 'string' characters to 'bytes' for output
serOutBuffer = serOutBuffer + text.encode() # add to transmit buffer
xmitSerial()
return
timeCmd = 0
#----
def sendCommand(text,limit):
'''Send command 'text' to SPEX.
Wait 'limit' seconds (float) for echo.'''
global timeCmd
print('sendCommand("{}", limit={})'.format(text,limit))
timeCmd = int(time.time()) # note the time
writeSerial(text+EOL)
checkSerialIn(text,limit) # wait 'limit' seconds for echo
return
#
#----
def writePositions():
'''Write monochrometer positions to "positions.txt" file.'''
#
global varLiveEXpos,varLiveEMpos
#
data = 'EX: ' + varLiveEXpos.get() + ' EM: ' + varLiveEMpos.get() + '\n'
fo = open('positions.txt','w')
fo.write(data)
fo.close()
return
#----
def readPositions():
'''Recover monochrometer positions from "positions.txt" file.'''
#
global varLiveEXpos,varLiveEMpos
#
try: # one line file: "EX: nnnn EM: mmmm"
tmpFile = open('positions.txt').read().splitlines()
for s in tmpFile:
t = s.split()
if len(t) == 4 and t[0] == 'EX:' and t[2] == 'EM:':
varLiveEXpos.set(t[1])
varLiveEMpos.set(t[3])
tmpFile = None
except:
varLiveEXpos.set('0')
varLiveEMpos.set('0')
writePositions()
return
def dataFileREAD():
'''Read Data file, seperate into lists of header and data.'''
#
global inputFileHdr # Header section from fileLoad
global inputFileData # Data section from fileload
#
inputFileHdr = []
inputFileData = []
#
dataFileTypes = [("Data ", ("*.txt","*.TXT")), ]
dataDir = '~/SCANS'
fInp = filedialog.askopenfilename(filetypes = dataFileTypes
,initialdir=dataDir)
# save file name for Plot Display
varRefFileName.set( fInp )
setPlotTitle() # update Title on plot
print('RefFileName: {}'.format(fInp))
#
tmpFile = open(fInp).read().splitlines()
#
header = True # looking for header lines first
#
for line in tmpFile: # examine each line in list
if header:
if line.startswith('...'): # end of Header line mark
header = False
else:
inputFileHdr.append(line.strip()) # save Header lines
else:
if line.startswith('___'): # end of Data line mark
break
else:
inputFileData.append(line.strip()) # save data lines
tmpFile = None # discard temp file data now
return
#----
def checkSerialIn(response,limit):
'''Verify the expected serial response happened within limit Sec (float).'''
global timeCmd
ms = int( float(limit) * 1000 )
for t in range(0, ms, 20): # 20 ms/loop
print('checkSerialIn("{}")'.format(int(time.time())-timeCmd))
time.sleep( 0.02 ) # 20 ms
if len(serInLines) == 0:
continue
else:
print('checkSerialIn({},{})'.format(response,t))
line = serInLines[0].strip() # get next line
if line == response:
serInLines.pop(0) # remove it
print('checkSerialIn(-): OK; got({})'.format(response))
return True
else:
print('checkSerialIn(-): BAD response: ({})'.format(line))
return False
print('checkSerialIn(-): timeout.')
return False # timeout
#
## command/response Queue
#
crQ = [] # PATTERN OF "entry": [cmd,resp,nxt]
# where; cmd = "D 0 0000", rsp = "D 0 0000", nxt = <func>
#----
def crQsend(entry):
'''Put "entry" into the crQ.
If no command is active, start sending it to RetroSPEX.'''
global crQ,serOutReady
#
print('crQsend(entry): {!r}'.format(entry))
print('crQsend(entry): crQ: {}'.format(crQ))
if len(crQ) == 0 and serOutReady: # if EMPTY && READY
#
print('\n\ncrQsend(entry): queue empty & ready -> send command')
crQ.append(entry) # add this command to Queue and Send it!
sendCommand(entry[0],2.0) # send command (2 sec timeout)
return
crQ.append(entry) # just add command to Queue
return
#----
def crQnext(cmd):
'''Response to sent command has been recieved,
take whatever next action (if any) is specified.'''
global crQ
pass
#----
def cmdHVonEM():
'''Set EM High Voltage to operating voltage level.
cmd = "D 1 <hexV>" -> rsp = "D 1 <hexV>" '''
global crQ
#
cmd = "D 1 " + VOLTStoHEX(varEMhv.get())
crQsend( [ cmd, cmd, nop] )
return
#----
def cmdHVoffEM():
'''Set EM High Voltage to zero volt level.
cmd = "D 1 0000" -> rsp = "D 1 0000" '''
global crQ
#
crQsend( ['D 1 0000', 'D 1 0000', nop] )
return
#----
def cmdHVonREF():
'''Set REF High Voltage to operating voltage level.
cmd = "D 0 <hexV>" -> rsp = "D 0 <hexV>" '''
global crQ
#
cmd = "D 0 " + VOLTStoHEX(varREFhv.get())
crQsend( [ cmd, cmd, nop] )
return
#----
def cmdHVoffREF():
'''Set REF High Voltage to zero volt level.
cmd = "D 0 0000" -> rsp = "D 0 0000" '''
global crQ
#
crQsend( ['D 0 0000', 'D 0 0000', nop] )
return
#----
def cmdADCconv():
'''Request ADC conversions of High Voltage levels.
cmd = "H" -> rsp = "! 01" '''
global crQ
pass
#----
def cmdADCread():
'''Read in the ADC measurements of High Voltage levels.
cmd = "A" -> rsp = "A 0000 0000" '''
global crQ
pass
#----
def cmdADCproc():
'''Process the ADC measurements of High Voltage levels.
cmd = "A" -> rsp = "A 0000 0000" '''
global crQ
pass
## Serial - ensure non-blocking
# -- NOTE: rtscts=1 hardware handshake for 'ready' with data
#
# establish serial module
# baudrate for RetroSPEX is 115200 (ArduinoDue max)
#
# look for serial port
def portTry(name):
'''Test 'name' to see if it is an available serial port.'''
global serialPort, firmwareVer, serInBuffer, serInLines, serOutReady
#TODO add timeout for testing a found serial port in portTry()
print("TODO add timeout for testing a found serial port in portTry()")
if comtest:
print('\tportTry(name): {}'.format(name))
try:
serialPort = serial.Serial(port=name
,baudrate=115200
,timeout=0
,rtscts=1
,dsrdtr=True
,writeTimeout=0)
# #serialPort.open() just opened !
# serialStatus = serialPort.isOpen()
# print('\nportTry({}): serialPort.isOpen = {}'.format(name,serialStatus))
# return serialStatus
if comtest:
print('\tOPEN...')
except:
# print('portTry(name): NOT portName: {}'.format(name))
# return False
if comtest:
print('\tCLOSED')
return False
#
buf = '' # input buffer for port testing
for trial in range(10):
if comtest:
print('\t\ttrial={}'.format(trial))
for t in range (43):
i = b''
while i == b'':
i = serialPort.read()
if i == b'':
if comtest:
print('\t\t read(): NULL')
c = chr( ord(i)%128 )
if comtest:
print('\t\t ord(c):{}, chr( ord(c)%128 ):{}'.format(ord(i),c))
if c == '\n':
if buf.startswith('RetroSPEX'): # This is RetroSPEX !!
firmwareVer = buf[:] # save for Version info.
updateTitle() # Title includes RetroSPEX Rev-#
serInBuffer = ''
serInLines = []
serOutReady = True # RetroSPEX is ready !
serialPort.write( b'\n' ) # send a response byte
return True
else:
buf = ''
else:
buf = buf + c
if comtest:
print('\t\t buf: {}'.format(buf))
return False
def portScan():
'''Search for serial USB port for Spectrometer, or "OFFLINE".'''
global serialPort, portList, portName, serInLines
#
for portName in portList :
if portName != 'OFFLINE':
if portTry(portName): # a serialPort found
readSerial() # start monitoring serial input
if jjltest:
print('\nFOUND serialPort={}'.format(serialPort))
print('FOUND portName: {}'.format(portName))
updateTitle() # new "Port" name
return
else: # end of list 'OFFLINE' reached, no port found
if comtest:
print('Serial Port not found... Operating OFFLINE')
# show ports tried
for name in portList[0:-1]:
print('tried: {}'.format(name))
#
# operating OFFLINE
updateTitle() # new "Port" or "OFFLINE"
return
title = siTitle + ', Port: {} on System: {}'
siWin.wm_title( title.format(portName, thisSys))
return
#
## 'sanityCheck' functions
#
# COMPARISONS:
# Within [min <= var <= max]
# Order [var1 < var2]
# Min [min <= var]
#
# Button lookup dictionary - defined as buttons are created below
btnLookup = {} # entries of form: 'EXS':<EX-Start_button>
# 'EXE':<EX-END_button>
# test variable min max EntryType
chkEntVal =[ [['Within', varEXwaveStart, MINnm, MAXnm] , 'EXS' ]
, [['Within', varEXwaveEnd, MINnm, MAXnm] , 'EXE' ]
, [['Order' , varEXwaveStart, varEXwaveEnd] , 'EXE' ]
, [['Min' , varEXinc, 1] , 'EXI' ]
, [['Within', varEMwaveStart, MINnm, MAXnm] , 'EMS' ]
, [['Within', varEMwaveEnd, MINnm, MAXnm] , 'EME' ]
, [['Order' , varEMwaveStart, varEMwaveEnd] , 'EME' ]
, [['Min' , varEMinc, 1] , 'EMI' ]
, [['Min' , varTMwaveEnd, 0.100] , 'TME' ]
, [['Order' , varTMwavePause, varTMwaveEnd] , 'TME' ]
, [['Min' , varTMinc, 0.001] , 'TMI' ]
]
#
def scanSanityCheck(warn = False):
'''Check that measurement parameters have "sane" values.
If not color Entry field WARNC color.
If "warn" argument is True also generate popup message.'''
#
isSane = True # start assuming that no errors were found ;-)
#
for e in chkEntVal:
test,entryType = e # get test list and Entry-Type
#
# are any of these Entry objects 'DISABLED'?
# - don't check values for disabled Entry fields
if btnLookup[entryType]['state'] == DISABLED:
continue # try next test
#
if test[0] == 'Min': # is entry at least equal to the minimum
#print('sanity()"Min":{}; {}'.format(test,entryType))
#if jjltest:
#print('scanSanityCheck()...: test[0]={}'.format(test[0]))
#print('.........entryType={}'.format(entryType))
if entryType[0] == 'T': # float value for Time settings
#print('sanity()"entryType": {}'.format(entryType))
var = getVarFloat(test[1])
#print('sanity() var: {}'.format(var))
else:
var = getVarInt(test[1])
#if entryType == 'TMI':
# print('TMI:.........var={} < min={}'.format(var,test[2]))
if var < test[2]: # BELOW minimum = Error
isSane = False
bgColor = WARNC
else:
bgColor = 'white'
elif test[0] == 'Within': # entry not OUTSIDE limits
#if jjltest:
#print('scanSanityCheck()...: test[0]={}'.format(test[0]))
#print('.........entryType={}'.format(entryType))
var = getVarInt(test[1])
#print('.........var={} < min={}'.format(var,test[2]))
limLow = test[2]
limHi = test[3]
#print('.........limLow={} < limHi={}'.format(limLow,limHi))
if var < limLow or var > limHi: # outside range
isSane = False
bgColor = WARNC
else:
bgColor = 'white'
elif test[0] == 'Order': # entry 1 less-than entry 2
#if jjltest:
#print('scanSanityCheck()...: test[0]={}'.format(test[0]))
#print('.........entryType={}'.format(entryType))
if entryType[0] == 'T': # float value
print('scanSanityCheck() #318... test[1]={}, '
'test[2]={}'.format(test[1],test[2]))
var1 = getVarFloat(test[1])
var2 = getVarFloat(test[2])
print('scanSanityCheck() #322... var1={}, var2={}'.format(
var1,var2))
else:
var1 = getVarInt(test[1])
var2 = getVarInt(test[2])
#print('.........var1={} < var2={}'.format(var1,var2))
if var1 >= var2: # improper order
isSane = False
bgColor = WARNC
else:
bgColor = 'white'
#
# set the selected color for the Entry object
btnObj = btnLookup[entryType]
btnObj['bg'] = bgColor # set button color
return isSane
#
## 'legacy' data file input functions
def dataFileMGET():
'''Read Data file, seperate into header and data.
Parse header into measurement parameters.
Set the parameters for taking another scan.
'''
global inputFileHdr # Header section from fileLoad
#
dataFileREAD() # read in data file, prepare header list
#
# Parse Header information - "classic format"
# Emission only - for now
scanIs = None
for line in inputFileHdr:
if line.startswith('Emission Scan'):
scanIs = EMscan
break
#
if scanIs == EMscan: # restore measurement variables
#
setScanMode(EMscan)
#
# varEMwaveStart = StringVar() # Emission Start Wavelength nm
# varEMwaveEnd = StringVar() # Emission End Wavelength nm
# varEMwaveInc = StringVar() # Emission Inc Wavelength nm
# varTMinc = StringVar() # Time Inc time S
# varEXwaveStart = StringVar() # Excitation Start Wavelength nm
#
for line in inputFileHdr:
if line.startswith('Start '): # Start , End
s,e = line.split(',')
s = s.split(' ')[1] # "Start 5.000000e+002"
n = int( float( s))
varEMwaveStart.set( str(n))
#
e = e.strip()
e = e.split(' ')[1] # "End 7.000000e+002"
n = int( float( e))
varEMwaveEnd.set( str(n))
continue
if line.startswith('Increment '):
c,t = line.split(',')
c = c.split(' ')[1] # "Increment 1.000000e+000"
n = int( float( c))
varEMwaveInc.set( str(n))
#
t = t.strip()
t = t.split(' ')[2] # "Integration Time 1.000000e-001"
n = float( t)
varTMinc.set( str(n))
continue
if line.startswith('Excit Mono Slits:'):
continue
if line.startswith('Excit Mono'):
x = line.split(' ')[2] # "Excit Mono 4.880000e+002"
n = int( float( x))
varEXwaveStart.set( str(n))
else:
# if scanIs != EMscan: # Error
if jjltest:
print("Can't handle non-Emission Scan yet.")
sys.exit(0)
scanSanityCheck()
return
def dataFileLOAD():
'''Read Data file, seperate into header and data.
Parse header into measurement parameters.
Parse data into x,y values for plotting.
'''
global inputFileData # Data section from fileload
global backgroundDataX # X value sample was taken at (wavelength / time)
global backgroundDataY # Y value of sample - PMT counts
#
dataFileMGET() # Read data file, setup measurement parameters.
#
backgroundDataX = []
backgroundDataY = []
#
for line in inputFileData:
pos,val = line.split('\t')
backgroundDataX.append( int( float( pos )))
backgroundDataY.append( float( val ))
updatePlot()
#
## 'dayfile.txt' - functions for recording Experimental Plan
#
#
# 'dayfile.txt' format:
#
# DATE: 2015-01-29
# Meaning of Experiment:
# #m#... (text: additional lines of meaning)
# Slit Widths EX: 2 (integer in nm)
# Slit Widths EM: 2 (integer in nm)
# Bulb Intensity: ?? (integer in ??)
# Notebook page: ?? (text)
# Other comments:
# #c#... (text: additional lines of comments)
#
# dayFileData = [] # Data section from fileload
# #
# varDayDate = StringVar() # Date this data was entered
# varDayMeaning1 = StringVar() # Meaning of Experiment
# varDayMeaning2 = StringVar() # Meaning of Experiment
# varDayMeaning3 = StringVar() # Meaning of Experiment
# varEXslit = StringVar() # Excitation slit size nm
# varEMslit = StringVar() # Emission slit size nm
# varDayBulb = StringVar() # Measured Bulb Intensity
# varDayNotebook = StringVar() # Notebook Page for Experiment Data
# varDayOther1 = StringVar() # Other comments
# varDayOther2 = StringVar() # Other comments
# varDayOther3 = StringVar() # Other comments
#
def makeDayFile():
'''Create new GUI screen for entering Experimental Data.
This data is constant for each day and recorded with data scans.'''
#
if jjltest:
print('makeDayFile()')
#
varDayDate.set( getDateToday() )
#
froot = Toplevel()
froot.title('Edit Experiment Information for {}'.format(varDayDate.get()))
froot.geometry(transGeom)
#siWin.withdraw()
#
# ========
#
#-------
frootFrame = Frame(froot, bg = TANBG)
frootFrame.grid()
#-------
dayTopFrame = LabelFrame(frootFrame, bg = TANBG, borderwidth=4
,text=' Meaning of Experiment: '
, font=monoFont14)
dayTopFrame.grid(row = 0, padx=4, pady=4, sticky=NSEW)
#
#
#-------
varDayMeaning1.set('')
dayMeanEnt1 = Entry(dayTopFrame, textvariable=varDayMeaning1
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayMeanEnt1.grid(row=1, padx=4, pady=0, sticky=EW)
dayMeanEnt1.focus_set()
#-------
varDayMeaning2.set('')
dayMeanEnt2 = Entry(dayTopFrame, textvariable=varDayMeaning2
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayMeanEnt2.grid(row=2, padx=4, pady=0, sticky=EW)
dayMeanEnt1.bind("<Return>", lambda e: setFocus(dayMeanEnt2))
#-------
varDayMeaning3.set('')
dayMeanEnt3 = Entry(dayTopFrame, textvariable=varDayMeaning3
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayMeanEnt3.grid(row=3, padx=4, pady=0, sticky=EW)
dayMeanEnt2.bind("<Return>", lambda e: setFocus(dayMeanEnt3))
#
# ========
#
#-------
dayMidFrame = Frame(frootFrame, bg = TANBG, borderwidth=0)
dayMidFrame.grid(row = 1, sticky=NSEW)
#
# Slit Width EX:
#-------
daySlitExLab = Label(dayMidFrame, text='Slit Width EX:'
, font=monoFont14, bg = TANBG )
daySlitExLab.grid(row=0, sticky=W)
#-------
daySlitExEnt = Entry(dayMidFrame, textvariable=varEXslit
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
daySlitExEnt.grid(row=0, column=1, padx=4, pady=4, sticky=E)
dayMeanEnt3.bind("<Return>", lambda e: setFocus(daySlitExEnt))
#
# Slit Width EM:
#-------
daySlitEmLab = Label(dayMidFrame, text='Slit Width EM:'
, font=monoFont14, bg = TANBG )
daySlitEmLab.grid(row=1, sticky=W)
#-------
daySlitEmEnt = Entry(dayMidFrame, textvariable=varEMslit
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
daySlitEmEnt.grid(row=1, column=1, padx=4, pady=4, sticky=E)
daySlitExEnt.bind("<Return>", lambda e: setFocus(daySlitEmEnt))
#
# Bulb Intensity:
#-------
dayBulbIntLab = Label(dayMidFrame, text='Bulb Intensity:'
, font=monoFont14, bg = TANBG )
dayBulbIntLab.grid(row=2, sticky=W)
#-------
dayBulbIntEnt = Entry(dayMidFrame, textvariable=varDayBulb
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
dayBulbIntEnt.grid(row=2, column=1, padx=4, pady=4, sticky=E)
daySlitEmEnt.bind("<Return>", lambda e: setFocus(dayBulbIntEnt))
#
# Notebook Page:
#-------
dayNbPageLab = Label(dayMidFrame, text='Notebook Page:'
, font=monoFont14, bg = TANBG )
dayNbPageLab.grid(row=3, sticky=W)
#-------
dayNbPageEnt = Entry(dayMidFrame, textvariable=varDayNotebook
,border=2, relief=SUNKEN, width=20
,font=monoFont14 )
dayNbPageEnt.grid(row=3, column=1, padx=4, pady=4, sticky=E)
dayBulbIntEnt.bind("<Return>", lambda e: setFocus(dayNbPageEnt))
#
# Other Comments:
#-------
dayBotFrame = LabelFrame(frootFrame, bg = TANBG, borderwidth=4
,text=' Other comments: ', font=monoFont14)
dayBotFrame.grid(row = 2, padx=4, pady=4, sticky=NSEW)
#-------
dayOtherEnt1 = Entry(dayBotFrame, textvariable=varDayOther1
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayOtherEnt1.grid(padx=4, pady=0, sticky=EW)
dayNbPageEnt.bind("<Return>", lambda e: setFocus(dayOtherEnt1))
#-------
dayOtherEnt2 = Entry(dayBotFrame, textvariable=varDayOther2
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayOtherEnt2.grid(padx=5, pady=0, sticky=EW)
dayOtherEnt1.bind("<Return>", lambda e: setFocus(dayOtherEnt2))
#-------
dayOtherEnt3 = Entry(dayBotFrame, textvariable=varDayOther3
,border=2, relief=SUNKEN, width=60
,font=monoFont14 )
dayOtherEnt3.grid(padx=6, pady=0, sticky=EW)
dayOtherEnt2.bind("<Return>", lambda e: setFocus(dayOtherEnt3))
#
# ========
#
def makeDayFileDone(root=froot):
#siWin.deiconify()
print('makeDayFileDone(root=froot): [A]')
froot.destroy()
print('makeDayFileDone(root=froot): [b]')
return
#
#-------
dayButFrame = Frame(frootFrame, bg = TANBG, borderwidth=4)
dayButFrame.grid(row = 3, padx=2, pady=2, sticky=NSEW)
#-------
dayButBut = Button(dayButFrame, bg = TANBG, borderwidth=4
,text = 'DONE', command = makeDayFileDone
,activebackground=ACTIVB, font=monoFont16)
dayButBut.grid()
dayOtherEnt3.bind("<Return>", lambda e: setFocus(dayButBut))
dayButBut.bind("<Return>", makeDayFileDone)
#
froot.transient(siWin)
froot.grab_set()
siWin.wait_window(froot)
#
# ======== NOW write out the data that was entered
#
dayFileData = [ 'DATE: ' + getDateToday()
, 'Meaning of Experiment: '
]
dayFileData.append( '# ' + varDayMeaning1.get() )
dayFileData.append( '# ' + varDayMeaning2.get() )
dayFileData.append( '# ' + varDayMeaning3.get() )
dayFileData.extend(
[ 'Slit Widths EX: ' + varEXslit.get()
, 'Slit Widths EM: ' + varEMslit.get()
, 'Bulb Intensity: ' + varDayBulb.get()
, 'Notebook page: ' + varDayNotebook.get()
, 'Other comments: '
] )
dayFileData.append( '# ' + varDayOther1.get() )
dayFileData.append( '# ' + varDayOther2.get() )
dayFileData.append( '# ' + varDayOther3.get() )
#
#
dayf = open('dayfile.txt','w')
dayf.write( '\n'.join(dayFileData) )
dayf.close()
#
print('makeDayFile(): CREATED')
print('dayFileData: {}'.format(dayFileData))
return
#
def checkDayFile():
'''Read 'dayfile.txt' and if not created today, update it.'''
global dayFileData
#
try:
dayf = open('dayfile.txt','r')
except:
print('dayfile.txt does not exist, CREATE (and write) it.')
makeDayFile()
return
#
# Check that the day file is for TODAY's date
dayFileData = dayf.read().splitlines()
dayf.close()
# file have data ?
if len(dayFileData)<1: # not one line !
makeDayFile() # create a new file
return
# examine the previous date
#print('len(dayFileData): {}'.format(len(dayFileData)))
today = dayFileData[0]
#print('checkDayFile(): dayFile.txt, line #1: {}'.format(today))
#
date = dayFileData[0].strip() # look at first line of file
#print('checkDayFile() READ: {}'.format(date))
if date.startswith( 'DATE: ' + getDateToday()) :
print('checkDayFile() CURRENT')
return # file has current data
# create a new file
makeDayFile()
return
#
## Settings Read (default settings, etc.) for measurement
#
def readSettings():
'''Read 'settings.txt' and recover default values.'''
if jjltest:
print('readSettings()')
#
# First set these to:
# "Factory Default Settings" (if no others are established)
#
# "EXinc: 1" # Setting EX Inc Wavelength (nm)
varEXinc.set('1')
# "EMinc: 1" # Setting EM Inc Wavelength (nm)
varEMinc.set('1')
# "TMinc: 0.1" # Setting TM Inc time (s)
varTMinc.set('0.1')
# "varEXslit: 2.9" # Setting EX slit width (nm)
varEXslit.set('2.9')
# "varEMslit: 2.9" # Setting EM slit width (nm)
varEMslit.set('2.9')
# "EMhv: -900" # Setting EM PMT high voltage (v)
varEMhv.set('-900')
# "REFdiode: 0" # Setting REF DIODE Gain setting [0,1,2,3]
varREFdiodeG.set('0')
# "REFhv: -450" # Setting REF PMT high voltage (v)
varREFhv.set('0')
# CALIBRATION SETTINGS:
# "EXstepsNm: 10" # EX Steper Motor Cal: steps/nm
varEXstepsNm.set('10')
# "EMstepsNm: 10" # EM Steper Motor Cal: steps/nm
varEMstepsNm.set('10')
#
# Now OVER-WRITE FACTORY with SITE'S SETTINGS
try:
tmpFile = open('settings.txt','r').read().splitlines()
for line in tmpFile:
#print('line = {}'.format(line))
items = line.split()
#
# parse 'settings.txt' for 'site default values'
# (SITE DEFAULT SETTINGS)
# EXinc: 1
# EMinc: 1
# TMinc: 0.1
# (SITE ESTABLISHED SETTINGS)
# EXslit: 2.9
# EMslit: 2.9
# EMhv: -900
# REFhv: -450
# EXstepsNm: 10
# EMstepsNm: 10
#
if items[0] == "EXinc:":
varEXinc.set(items[1])
elif items[0] == "EMinc:":
varEMinc.set(items[1])
elif items[0] == "TMinc:":
varTMinc.set(items[1])
elif items[0] == "EXslit:":
varEXslit.set(items[1])
elif items[0] == "EMslit:":
varEMslit.set(items[1])
elif items[0] == "EMhv:":
varEMhv.set(items[1])
elif items[0] == "REFdiode:":
varREFdiodeG.set(items[1])
elif items[0] == "REFhv:":
varREFhv.set(items[1])
elif items[0] == "EXstepsNm:":
varEXstepsNm.set(items[1])
elif items[0] == "EMstepsNm:":
varEMstepsNm.set(items[1])
except:
pass # no SITE SETTINGS WERE SAVED
if jjltest:
print('settings.txt does not exist!')
#
scanSanityCheck() # verify ranges are 'reasonalbe' tint any not so
return
#
## Settings Edit (default settings, etc.) for measurement
def editSettings():
'''Edit 'settings.txt' to alter default values.'''
#
edset = Toplevel()
edset.geometry(transGeom)
edset.title("Spectrometer Settings")
#
#-------
edsetTop = Frame(edset, bg = TANBG)
edsetTop.grid()
#
# User Default Settings SETTINGS - defaults to load for editing
#
# varEXinc = StringVar() # Setting EX Inc Wavelength (nm)
# varEMinc = StringVar() # Setting EM Inc Wavelength (nm)
# varTMinc = StringVar() # Setting TM Inc time (s)
# varEXslit = StringVar() # Setting EX Slit Opening (nm)
# varEMslit = StringVar() # Setting EM Slit Opening (nm)
# varEMhv = StringVar() # Setting EM PMT high voltage (v)
# varREFdiodeG = StringVar() # Setting for REF DIODE Gain
# varREFhv = StringVar() # Setting REF PMT high voltage (v)
#
#-------
edsetPf = LabelFrame(edsetTop, text="Site Default Settings."
,bg = TANBG, font=monoFont16
,borderwidth=6)
edsetPf.grid(row=0, padx=4, pady=4, sticky=EW)
#
# EX default increment (nm)
#-------
EXiPL = Label(edsetPf, text = "EX default increment (nm):"
, bg = TANBG, font=monoFont14)
EXiPL.grid(row=0, column=0, padx=4, sticky=W)
#-------
EXiPE = Entry(edsetPf, textvariable = varEXinc, font=monoFont14)
EXiPE.grid(row=0, column=1, padx=4, sticky=E)
#
# EM default increment (nm)
#-------
EMiPL = Label(edsetPf, text = "EM default increment (nm):"
, bg = TANBG, font=monoFont14)
EMiPL.grid(row=1, column=0, padx=4, sticky=W)
#-------
EMiPE = Entry(edsetPf, textvariable = varEMinc, font=monoFont14)
EMiPE.grid(row=1, column=1, padx=4, sticky=E)
#
# TM default increment (S)
#-------
TMiPL = Label(edsetPf, text = "TM default increment (S):"
, bg = TANBG, font=monoFont14)
TMiPL.grid(row=2, column=0, padx=4, sticky=W)
#-------
TMiPE = Entry(edsetPf, textvariable = varTMinc, font=monoFont14)
TMiPE.grid(row=2, column=1, padx=4, sticky=E)
#
# Site Established Settings - due to instrument setup. I.E.
# CALIBRATION SETTINGS - measured during calibration of spectrometer
# {stepper motor calibration values - should not need changing}
# varEXstepsNm = StringVar() # EX StepMotor steps per (nm)
# varEMstepsNm = StringVar() # EM StepMotor steps per (nm)
#
#-------
edsetCf = LabelFrame(edsetTop, text="Site Established Settings."
,bg = TANBG, font=monoFont16
,borderwidth=6)
edsetCf.grid(row=1, padx=4, pady=4, sticky=EW)
#
# EX Slit size (nm)
#-------
EXiPL = Label(edsetCf, text = "EX Slit size (nm):"
, bg = TANBG, font=monoFont14)
EXiPL.grid(row=0, column=0, padx=4, sticky=W)
#-------
EXiPE = Entry(edsetCf, textvariable = varEXslit, font=monoFont14)
EXiPE.grid(row=0, column=1, padx=4, sticky=E)
#
# EM Slit size (nm)
#-------
EMiPL = Label(edsetCf, text = "EM Slit size (nm):"
, bg = TANBG, font=monoFont14)
EMiPL.grid(row=1, column=0, padx=4, sticky=W)
#-------
EMiPE = Entry(edsetCf, textvariable = varEMslit, font=monoFont14)
EMiPE.grid(row=1, column=1, padx=4, sticky=E)
#
# EM PMT high voltage (v)
#-------
EMhvL = Label(edsetCf, text = "EM PMT high voltage (v):"
, bg = TANBG, font=monoFont14)
EMhvL.grid(row=2, column=0, padx=4, sticky=W)
#-------
EMhvE = Entry(edsetCf, textvariable = varEMhv, font=monoFont14)
EMhvE.grid(row=2, column=1, padx=4, sticky=E)
#
# REF DIODE Gain setting [0,1,2,3]
#-------
REFhvL = Label(edsetCf, text = "REF DIODE Gain Setting:"
, bg = TANBG, font=monoFont14)
REFhvL.grid(row=3, column=0, padx=4, sticky=W)
#-------
# varREFdiodeG = StringVar() # REF DIODE Gain setting [0,1,2,3]
REFhvE = Entry(edsetCf, textvariable = varREFdiodeG, font=monoFont14)
REFhvE.grid(row=3, column=1, padx=4, sticky=E)
#
# REF PMT high voltage (v)
#-------
REFhvL = Label(edsetCf, text = "REF PMT high voltage (v):"
, bg = TANBG, font=monoFont14)
REFhvL.grid(row=4, column=0, padx=4, sticky=W)
#-------
REFhvE = Entry(edsetCf, textvariable = varREFhv, font=monoFont14)
REFhvE.grid(row=4, column=1, padx=4, sticky=E)
#
# EX Steper Motor Cal: steps/nm
#-------
EXnmCL = Label(edsetCf, text = "EX motor steps/nm:"
, bg = TANBG, font=monoFont14)
EXnmCL.grid(row=5, column=0, padx=4, sticky=W)
#-------
EXnmCE = Entry(edsetCf, textvariable = varEXstepsNm, font=monoFont14)
EXnmCE.grid(row=5, column=1, padx=4, sticky=E)
#
# EM Steper Motor Cal: steps/nm
#-------
EMnmCL = Label(edsetCf, text = "EM motor steps/nm:"
, bg = TANBG, font=monoFont14)
EMnmCL.grid(row=6, column=0, padx=4, sticky=W)
#-------
EMnmCE = Entry(edsetCf, textvariable = varEMstepsNm, font=monoFont14)
EMnmCE.grid(row=6, column=1, padx=4, sticky=E)
#
#
# DONE
def edsetDone(x=None):
# Write out Settings to 'settings.txt'
fo = open('settings.txt','w')
tempData = [ '# site default settings'
, 'EXinc: ' + varEXinc.get()
, 'EMinc: ' + varEMinc.get()
, 'TMinc: ' + varTMinc.get()
, '# site calibrated settings'
, 'EXslit: ' + varEXslit.get()
, 'EMslit: ' + varEMslit.get()
, 'EMhv: ' + varEMhv.get()
, 'REFdiode: ' + varREFdiodeG.get()
, 'REFhv: ' + varREFhv.get()
, 'EXstepsNm: ' + varEXstepsNm.get()
, 'EMstepsNm: ' + varEMstepsNm.get()
]
#
fo.write( '\n'.join(tempData) )
fo.close()
# next read in (apply) settings
readSettings()
# lastly Close Edit window
edset.destroy()
return # ignore
#
bDone = Button(edsetTop, text = 'DONE', bg = TANBG, borderwidth=4
,command = edsetDone
,activebackground=ACTIVB, font=monoFont16)
bDone.grid(row=2,padx=4, pady=2, sticky=W)
#
edset.transient(siWin)
edset.grab_set()
siWin.wait_window(edset)
# if jjltest:
# print( 'editSettings(): edsetDone!')
return
#
## initialize hardware for RetroSPEX controller
#
def RetroSPEXinit():
'''Load initial settings into controller.
High Voltage = off (0 volts),
Gain set to preset value,
LED on front blink and then OFF,
etc.'''
#TODO - HV setup
print("#TODO - HV setup")
#
cmdHVoffEM() # EMission PMT voltage -> ZERO
#
cmdHVoffREF() # REFerence PMT voltage -> ZERO
#
#TODO - restore steppers (? or elsewhere ?)
#TODO - begin 'real time data monitoring'
print("#TODO - restore steppers (? or elsewhere ?)")
print("#TODO - begin 'real time' data monitoring")
return
#
## Calibration Input (odometer settings) for monochrometer
#
# varLiveEXpos = StringVar() # EX monochrometer position (nm)
# varLiveEMpos = StringVar() # EM monochrometer position (nm)
#
def monoCal():
'''Get 'odometer' values for the monochrometers.
(i.e. Calibrate SPEX monochrometers; EX and EM.)'''
#
cal = Toplevel()
cal.geometry(transGeom)
cal.title("Monochronometer Calibration")
#
calTop = Frame(cal, bg = TANBG)
calTop.grid()
#
calf = LabelFrame(calTop, text="Verify odometer values."
,bg = TANBG, font=monoFont16
,borderwidth=6)
calf.grid(padx=4,pady=4)
#
lEX = Label(calf, text = "EXcitation:"
, bg = TANBG, font=monoFont14)
lEX.grid(row=0, column=0, padx=4, sticky=E)
eEX = Entry(calf, textvariable = varLiveEXpos, font=monoFont14)
eEX.grid(row=0, column=1, padx=4, sticky=E)
def eEXchk(x=None):
eEX['bg'] = 'white'
return
eEX.bind('<KeyRelease>',eEXchk)
eEX.focus_set()
#
lEM = Label(calf, text = "EMission:"
, bg = TANBG, font=monoFont14)
lEM.grid(row=1, column=0, padx=4, sticky=E)
eEM = Entry(calf, textvariable = varLiveEMpos, font=monoFont14)
eEM.grid(row=1, column=1, padx=4, sticky=E)
def eEMchk(x=None):
eEM['bg'] = 'white'
return
eEM.bind('<KeyRelease>',eEMchk)
#
#
def monoCheck(val, ent):
'''True if val in 'legal' range, False otherwise.
Sets Entry field pink when val is outside 'legal'.'''
n = getVarInt(val)
if n >= MINnm and n<= MAXnm:
ent['bg'] = 'white' # 'legal' value
return True
else:
ent['bg'] = WARNC # 'illegal' value
ent.focus_set()
return False
#
def monoCalDone(x=None):
# Close window if both values are in 'normal' range
if monoCheck(varLiveEXpos, eEX) and monoCheck(varLiveEMpos, eEM):
writePositions() # save Verified positions to file
cal.destroy()
return # ignore
#
bDone = Button(calTop, text = 'DONE', bg = TANBG, borderwidth=4
,command = monoCalDone
,activebackground=ACTIVB, font=monoFont16)
bDone.grid(row=1, column=0, padx=4, pady=2, sticky=W)
#
cal.transient(siWin)
cal.grab_set()
siWin.wait_window(cal)
print( '\nmonoCal(): done!')
#
## Power Up - operations to sequence initialization of hardware/software
#
def PowerUp():
'''Load "settings" and calibrate SPEX.'''
global serOutReady
#
readSettings() # load the Default settings for the spectrometer
#
# establish serial connection to RetroSPEX controller")
# or, set to 'offline' mode to look at files, etc.")
#
portScan() # search for serialPort to spectrometer
#
if portName != 'OFFLINE':
#TODO flash LED repeatedly to indicate readyness
print("TODO flash LED repeatedly to indicate readyness")
#
#TODO log "run time" (bulb life? - i.e. need start time)
#
#TODO if connected: Initialize RetroSPEX controller settings
#TODO i.e. HV levels (0 volts initially), 'G'ain setting, etc.
print("TODO: if connected: Initialize RetroSPEX controller settings")
RetroSPEXinit()
#
print("TODO: if connected: Monochrometers by 10nm (anti-backlash)")
readPositions()
#TODO Move Monochrometers by -10nm/+10nm (anti-backlash)
#
# perform monochrometer calibration (verification)
monoCal()
#TODO ( => always move POS dir (or sept NEG val+10 and then POS 10)
#TODO ( => real time display values initialize)
#
checkDayFile()
#
return
#
## Power Down - operations to sequence shutdown of hardware/software
#
def PowerDown():
#
if portName != 'OFFLINE':
#
#TODO stop scan if one is in process
print("TODO: scan if one is in process")
#
#TODO if connected: Initialize RetroSPEX controller settings
#TODO i.e. HV levels (0 volts initially), 'G'ain setting, etc.
print("TODO: if connected: Initialize RetroSPEX controller settings")
RetroSPEXinit()
#
#TODO log "run time" (bulb life? - i.e. need start time)
#
#TODO log data such as monochrometer position on shutdown
print("TODO: log data such as monochrometer position on shutdown")
#
return
#====================================
## Scan Control Frame
#
#-------
controlsFrame = Frame(siWin, bg = TANBG, borderwidth=0)
controlsFrame.grid(row=0,column=0, sticky=N)
#
#-------
scfScanControlFrame = LabelFrame(controlsFrame,text='Control',
bg = TANBG, borderwidth=4)
scfScanControlFrame.grid(row=0,column=0, sticky=N)
## Scan; START/STOP - Spectrometer scan control
#
scanStopIcon = PhotoImage(file='icons/icon_scanSTOP.gif')
scanStartIcon = PhotoImage(file='icons/icon_scanSTART.gif')
runOn = False # default == OFF
#
def toggleScan():
'''Scan Start/Stop - Spectrometer scan control'''
global runOn
if runOn: # then STOP the scan !!
if jjltest:
print('STOPPING NOT IMPLEMENTED YET ;-)')
runOn = False
runScfB00['image'] = scanStartIcon
else: # START up a scan
# perform sanity checks before starting scan
sane = scanSanityCheck( warn = True )
if jjltest:
print('STARTING A SCAN NOT IMPLEMENTED YET ;-)')
sane = False
if sane:
runOn = True
runScfB00['image'] = scanStopIcon
return
#
#-------
runScfB00 = Button(scfScanControlFrame,image=scanStartIcon
,borderwidth = 0,activebackground=ACTIVB
,bg = TANBG, command = toggleScan )
runScfB00.grid(column=0,row=0, padx=2)
## HV - On/Off - High Voltage (red: safety concern)
#
hvOffIcon = PhotoImage(file='icons/icon_hvOff.gif')
hvOnIcon = PhotoImage(file='icons/icon_hvOn.gif')
hvOn = False # default == OFF
#
def toggleHV():
'''HV - On/Off - High Voltage (red: safety concern)'''
global hvOn
hvOn = toggleBtnVar(hvOn, hvScfB01, hvOffIcon, hvOnIcon)
if hvOn:
cmdHVonEM() # turn HV on
cmdHVonREF()
else:
cmdHVoffEM() # turn HV off
cmdHVoffREF()
return
#
#-------
hvScfB01 = Button(scfScanControlFrame, image = hvOffIcon
,activebackground=ACTIVB
,borderwidth = 0, bg = TANBG, command = toggleHV)
hvScfB01.grid(column=0,row=1)
#====================================
## Ref. Data Frame -- Load previous Scan Data for Reference or Settings recall
#
#-------
filesFrame = LabelFrame(controlsFrame,text='Ref. Data',
bg = TANBG, borderwidth=4)
filesFrame.grid(row=1,column=0, padx=2, sticky=NW)
#
# LOAD experimental settings from disk
dataLoadIcon = PhotoImage(file='icons/icon_dataLOAD.gif')
#
#-------
fileFileDataLoad = Button(filesFrame, image=dataLoadIcon
, bg = TANBG, activebackground=ACTIVB
,command = dataFileLOAD
,borderwidth = 0, font=monoFont14 )
fileFileDataLoad.grid(row=0, column=0, sticky=NW)
#
#
dataMgetIcon = PhotoImage(file='icons/icon_dataMGET.gif')
#
#-------
fileSettingsGet = Button(filesFrame, image=dataMgetIcon, bg = TANBG
,command = dataFileMGET,activebackground=ACTIVB
,borderwidth = 0, font=monoFont14 )
fileSettingsGet.grid(row=1, column=0,sticky=NW)
#====================================
## Macro Files Frame
#
#-------
macroFrame = LabelFrame(controlsFrame,text='Macro Files',
bg = TANBG, borderwidth=4)
macroFrame.grid(row=2,column=0, padx=2, sticky=NW)
#
# LOAD scan settings from disk
macroLoadIcon = PhotoImage(file='icons/icon_macroLOAD.gif')
#
#-------
macroFileLoad = Button(macroFrame, image=macroLoadIcon, bg = TANBG
,borderwidth = 0
,activebackground=ACTIVB, font=monoFont14 )
macroFileLoad.grid(row=0, column=0,sticky=NW)
#
#
macroEditIcon = PhotoImage(file='icons/icon_macroEDIT.gif')
#
#-------
macroFileEdit = Button(macroFrame, image=macroEditIcon, bg = TANBG
, borderwidth = 0
,activebackground=ACTIVB, font=monoFont14 )
macroFileEdit.grid(row=1, column=0,sticky=NW)
#====================================
## Settings Frame
#
#-------
settingsFrame = LabelFrame(controlsFrame,text='Settings',
bg = TANBG, borderwidth=4)
settingsFrame.grid(row=12,column=0, sticky=S)
#
#
settingsIcon = PhotoImage(file='icons/icon_settings.gif')
#
#-------
settingsBtn = Button(settingsFrame, image=settingsIcon, bg = TANBG
,borderwidth = 0, command = editSettings
,activebackground=ACTIVB, font=monoFont14 )
settingsBtn.grid()
#====================================
## Quit Frame
#
def quitCommand():
#
# Shutdown equipment
#
PowerDown()
#
siWin.destroy()
#-------
quitFrame = LabelFrame(controlsFrame,text='Quit',
bg = TANBG, borderwidth=4)
quitFrame.grid(row=13,column=0, sticky=S)
#
#
quitIcon = PhotoImage(file='icons/icon_quit.gif')
#
#-------
quitBtn = Button(quitFrame, image=quitIcon, bg = TANBG, borderwidth = 0
,command = quitCommand
,activebackground=ACTIVB, font=monoFont14 )
quitBtn.grid()
#====================================
## Experiment Frame -- Window to right of Control frame
#
#-------
efFrame = Frame(siWin, bg = TANBG, borderwidth=0)
efFrame.grid(row=0,column=1,sticky=NW)
#====================================
## Experiment Settings Frame
#
#-------
esfFrame = Frame(efFrame, bg = TANBG, borderwidth=0)
esfFrame.grid(row=0,column=0,sticky=NW)
#====================================
## Spectrometer / Specimen Box Frame
#
#-------
ssbFrame = Frame(esfFrame, bg = TANBG, borderwidth=0)
ssbFrame.grid(row=0,column=0,sticky=EW)
#====================================
## Spectrometer Settings Frame
#
#-------
ssfFrame = LabelFrame(ssbFrame,text='Spectrometer Settings',
bg = TANBG, borderwidth=4)
ssfFrame.grid(row=0,column=0,sticky=NW)
#====================================
## Spectrometer EX Frame - EXcitation
#
# EX scan
#
#-------
sEXfFrame = Frame(ssfFrame, bg = TANBG)
sEXfFrame.grid(row=0,column=0,sticky=NW)
#
#
sEXfB00_FR = NotImplemented # forward reference to Button
sEMfB00_FR = NotImplemented # forward reference to Button
sTMfB00_FR = NotImplemented # forward reference to Button
#
exIconT = PhotoImage(file='icons/icon_modeEXt.gif')
exIconF = PhotoImage(file='icons/icon_modeEXf.gif')
#
emIconT = PhotoImage(file='icons/icon_modeEMt.gif')
emIconF = PhotoImage(file='icons/icon_modeEMf.gif')
#
tmIconT = PhotoImage(file='icons/icon_modeTMt.gif')
tmIconF = PhotoImage(file='icons/icon_modeTMf.gif')
#
def buttonEX():
'''Display/Change scanning mode: to EXcitation.'''
setScanMode(EXscan)
return
#
#-------
sEXfB00 = Button(sEXfFrame, image = exIconT, bg = TANBG
,borderwidth=0, command = buttonEX,activebackground=ACTIVB)
sEXfB00.grid(row=0,column=0,sticky=W)
sEXfB00_FR = sEXfB00 # resolve the forward reference to this button
#
# Wavelength Setting (frame)
#-------
sEXwavFrame = Frame(sEXfFrame, bg = TANBG)
sEXwavFrame.grid(row=0,column=2,sticky=NW)
#
# Wavelength Start - Label
#-------
sEXwavSLabel = Label(sEXwavFrame, text='Start (nm)', font=monoFont12, bg = TANBG )
sEXwavSLabel.grid(row=0, column=0,padx=2,sticky=W)
#
# Wavelength End - Label
#-------
sEXwavELabel = Label(sEXwavFrame, text='End (nm)', font=monoFont12, bg = TANBG )
sEXwavELabel.grid(row=0, column=1,padx=2,sticky=W)
#
# Wavelength Inc - Label
#-------
sEXwavILabel = Label(sEXwavFrame, text='Inc (nm)', font=monoFont12, bg = TANBG )
sEXwavILabel.grid(row=0, column=2,padx=2,sticky=W)
#
# Start wavelength - Enter
#
def validateEXwaveStart(eventKeyRelease):
sEXwavSEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEXwavSEntry = Entry(sEXwavFrame, textvariable=varEXwaveStart,
border=2, relief=SUNKEN, width=8, font=monoFont14 )
sEXwavSEntry.grid(row=1, column=0, padx=4, pady=2, sticky=W)
sEXwavSEntry.bind('<KeyRelease>',validateEXwaveStart)
#
btnLookup['EXS'] = sEXwavSEntry # put button into dictionary by name
#
# End wavelength - Enter
#
def validateEXwaveEnd(eventKeyRelease):
sEXwavEEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEXwavEEntry = Entry(sEXwavFrame, textvariable=varEXwaveEnd,
border=2, relief=SUNKEN, width=7, font=monoFont14 )
sEXwavEEntry.grid(row=1, column=1, padx=4, pady=2, sticky=W)
sEXwavEEntry.bind('<KeyRelease>',validateEXwaveEnd)
#
btnLookup['EXE'] = sEXwavEEntry # put button into dictionary by name
#
# Inc wavelength - Enter
#
def validateEXwaveInc(eventKeyRelease):
sEXwavIEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEXwavIEntry = Entry(sEXwavFrame, textvariable=varEXinc,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sEXwavIEntry.grid(row=1, column=2, padx=4, pady=2, sticky=W)
sEXwavIEntry.bind('<KeyRelease>',validateEXwaveInc)
#
btnLookup['EXI'] = sEXwavIEntry # put button into dictionary by name
#====================================
## Spectrometer EM Frame - EMission
#
# EM scan
#
#-------
sEMfFrame = Frame(ssfFrame, bg = TANBG)
sEMfFrame.grid(row=0,column=1,sticky=NW)
#
def buttonEM():
'''Display/Change scanning mode: to EMission.'''
setScanMode(EMscan)
return
#
#-------
sEMfB00 = Button(sEMfFrame, image = emIconF, bg = TANBG
,borderwidth=0, activebackground=ACTIVB, command = buttonEM)
sEMfB00.grid(row=0,column=0,sticky=W)
sEMfB00_FR = sEMfB00 # resolve the forward reference to this button
#
# Wavelength Setting (frame)
#-------
sEMwavFrame = Frame(sEMfFrame, bg = TANBG)
sEMwavFrame.grid(row=0,column=2,sticky=NW)
#
# Wavelength Start - Label
#-------
sEMwavSLabel = Label(sEMwavFrame, text='Start (nm)', font=monoFont12, bg = TANBG )
sEMwavSLabel.grid(row=0, column=0,padx=2,sticky=W)
#
# Wavelength End - Label
#-------
sEMwavELabel = Label(sEMwavFrame, text='End (nm)', font=monoFont12, bg = TANBG )
sEMwavELabel.grid(row=0, column=1,padx=2,sticky=W)
#
# Wavelength Inc - Label
#-------
sEMwavILabel = Label(sEMwavFrame, text='Inc (nm)', font=monoFont12, bg = TANBG )
sEMwavILabel.grid(row=0, column=2,padx=2,sticky=W)
#
# Start wavelength - Enter
#
def validateEMwaveStart(eventKeyRelease):
sEMwavSEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEMwavSEntry = Entry(sEMwavFrame, textvariable=varEMwaveStart,
border=2, relief=SUNKEN, width=8, font=monoFont14 )
sEMwavSEntry.grid(row=1, column=0, padx=4, pady=2, sticky=E)
sEMwavSEntry.bind('<KeyRelease>',validateEMwaveStart)
#
btnLookup['EMS'] = sEMwavSEntry # put button into dictionary by name
#
# End wavelength - Enter
#
def validateEMwaveEnd(eventKeyRelease):
sEMwavEEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEMwavEEntry = Entry(sEMwavFrame, textvariable=varEMwaveEnd,
border=2, relief=SUNKEN, width=7, font=monoFont14 )
sEMwavEEntry.grid(row=1, column=1, padx=4, pady=2, sticky=EW)
sEMwavEEntry.bind('<KeyRelease>',validateEMwaveEnd)
#
btnLookup['EME'] = sEMwavEEntry # put button into dictionary by name
#
# Inc wavelength - Enter
#
def validateEMwaveInc(eventKeyRelease):
sEMwavIEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sEMwavIEntry = Entry(sEMwavFrame, textvariable=varEMinc,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sEMwavIEntry.grid(row=1, column=2, padx=4, pady=2, sticky=EW)
sEMwavIEntry.bind('<KeyRelease>',validateEMwaveInc)
#
btnLookup['EMI'] = sEMwavIEntry # put button into dictionary by name
#====================================
## Spectrometer TM Frame - TiMe
#
# TM scan
#
#-------
sTMfFrame = Frame(ssfFrame, bg = TANBG)
sTMfFrame.grid(row=0,column=2,sticky=NW)
#
def buttonTM():
'''Display/Change scanning mode: to EXcitation.'''
setScanMode(TMscan)
return
#
#-------
sTMfB00 = Button(sTMfFrame, image = tmIconF, bg = TANBG,
borderwidth=0,activebackground=ACTIVB, command = buttonTM)
sTMfB00.grid(row=0,column=0,sticky=W)
sTMfB00_FR = sTMfB00 # resolve the forward reference to this button
#
#
# Time Setting (frame)
#-------
sTMwavFrame = Frame(sTMfFrame, bg = TANBG)
sTMwavFrame.grid(row=0,column=1,sticky=NW)
#
# Pause step# - Label
#-------
sTMwavPLabel = Label(sTMwavFrame, text='Pause(S)'
, font=monoFont12, bg = TANBG )
sTMwavPLabel.grid(row=0, column=0,padx=2,sticky=W)
#
# End step# - Label
#-------
sTMwavELabel = Label(sTMwavFrame, text='End (S)'
, font=monoFont12, bg = TANBG )
sTMwavELabel.grid(row=0, column=1,padx=2,sticky=W)
#
# Increment Time - Label
#-------
sTMwavILabel = Label(sTMwavFrame, text='Inc (S)'
, font=monoFont12, bg = TANBG )
sTMwavILabel.grid(row=0, column=2,padx=2,sticky=W)
#
#
# Pause (step#) - Enter
#
def validateTMwavePause(eventKeyRelease):
sTMwavPEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sTMwavPEntry = Entry(sTMwavFrame, textvariable=varTMwavePause,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sTMwavPEntry.grid(row=1, column=0, padx=4, pady=2, sticky=EW)
sTMwavPEntry.bind('<KeyRelease>',validateTMwavePause)
#
btnLookup['TMP'] = sTMwavPEntry # put button into dictionary by name
#
# End step# - Enter
#
def validateTMwaveEnd(eventKeyRelease):
sTMwavEEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sTMwavEEntry = Entry(sTMwavFrame, textvariable=varTMwaveEnd,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sTMwavEEntry.grid(row=1, column=1, padx=4, pady=2, sticky=EW)
sTMwavEEntry.bind('<KeyRelease>',validateTMwaveEnd)
#
btnLookup['TME'] = sTMwavEEntry # put button into dictionary by name
#
# Increment Time - Enter
#
def validateTMwaveInc(eventKeyRelease):
sTMwavIEntry['bg'] = 'white' # set button color 'white' on edit
return
#
#-------
sTMwavIEntry = Entry(sTMwavFrame, textvariable=varTMinc,
border=2, relief=SUNKEN, width=6, font=monoFont14 )
sTMwavIEntry.grid(row=1, column=2, padx=4, pady=2, sticky=W)
sTMwavIEntry.bind('<KeyRelease>',validateTMwaveInc)
#
btnLookup['TMI'] = sTMwavIEntry # put button into dictionary by name
#====================================
## S+R Frame - record Reference data?
#
# S+R
#
#-------
srFrame = Frame(ssfFrame, bg = TANBG)
srFrame.grid(row=0,column=3,sticky=NW)
#
# Reference Data - On/Off - 'S'(signal) alone or with 'R'(reference) too?
#
refOffIcon = PhotoImage(file='icons/icon_refOff.gif')
refOnIcon = PhotoImage(file='icons/icon_refOn.gif')
refOn = False # default == OFF (i.e. 'S' and 'R')
#
def toggleRef():
'''Ref - On/Off - 'S'(signal) alone or with 'R'(reference) too?'''
global refOn
refOn = toggleBtnVar(refOn, refScfB02, refOffIcon, refOnIcon)
return
#
#-------
refScfB02 = Button(srFrame, image = refOffIcon, borderwidth = 0
,bg = TANBG,activebackground=ACTIVB, command = toggleRef)
refScfB02.grid(row=0,column=0,sticky=W)
#====================================
## Set 'scan mode' - complete forward reference
#
def setScanMode(mode):
'''Select the type of spectrometer scan to perform.
Sets the EX, EM and TM Icons to incidate scan type.
Sets the 'state' (NORMAL/DISABLE) for scan setting params.'''
#
# any change?
if varScanMode.get() == mode:
if jjltest:
print('setScanMode(): NO change.')
return # no change
#
varScanMode.set(mode) # set the scan mode
#
# update icons
if varScanMode.get() == EXscan :
sEXfB00_FR['image'] = exIconT # SCAN MODE - back to Default
sEMfB00_FR['image'] = emIconF
sTMfB00_FR['image'] = tmIconF
elif varScanMode.get() == EMscan :
sEXfB00_FR['image'] = exIconF
sEMfB00_FR['image'] = emIconT # SCAN MODE
sTMfB00_FR['image'] = tmIconF
elif varScanMode.get() == TMscan :
sEXfB00_FR['image'] = exIconF
sEMfB00_FR['image'] = emIconF
sTMfB00_FR['image'] = tmIconT # SCAN MODE
else:
if jjltest:
print('Bad scan mode found in setScanMode(mode)')
sys.exit(0)
#
updatePlot() # synchronize plot with scan mode
#
# set the correct 'state' for wavelength/time icons
#
if varScanMode.get() == EXscan:
sEXwavSLabel['text'] = 'Start (nm)' # EXscan - Start wavelength
sEXwavELabel['text'] = 'End (nm)' # - End label set
sEXwavEEntry['state'] = NORMAL # - End entry enabled
sEXwavILabel['text'] = 'Inc (nm)' # - Inc label set
sEXwavIEntry['state'] = NORMAL # - Inc entry enabled
sEMwavSLabel['text'] = 'Park (nm)' # EMscan - EM wavelength Parked
sEMwavELabel['text'] = '' # - End label cleared
sEMwavEEntry['state'] = DISABLED # - End entry disabled
sEMwavILabel['text'] = '' # - Inc label cleared
sEMwavIEntry['state'] = DISABLED # - Inc entry disabled
sTMwavPLabel['text'] = '' # TMscam - Pause label cleared
sTMwavPEntry['state'] = DISABLED # - Pause entry disabled
sTMwavELabel['text'] = '' # - End label cleared
sTMwavEEntry['state'] = DISABLED # - End entry disabled
elif varScanMode.get() == EMscan:
sEXwavSLabel['text'] = 'Park (nm)' # EXscan - EX wavelength Parked
sEXwavELabel['text'] = '' # - End label cleared
sEXwavEEntry['state'] = DISABLED # - End entry disabled
sEXwavILabel['text'] = '' # - Inc label cleared
sEXwavIEntry['state'] = DISABLED # - End entry disabled
sEMwavSLabel['text'] = 'Start (nm)' # EMscan - EM wavelength set
sEMwavELabel['text'] = 'End (nm)' # - End label set
sEMwavEEntry['state'] = NORMAL # - End entry enabled
sEMwavILabel['text'] = 'Inc (nm)' # - Inc label set
sEMwavIEntry['state'] = NORMAL # - Inc entry enabled
sTMwavPLabel['text'] = '' # TMscam - Pause label cleared
sTMwavPEntry['state'] = DISABLED # - Pause entry disabled
sTMwavELabel['text'] = '' # - End label cleared
sTMwavEEntry['state'] = DISABLED # - End entry disabled
elif varScanMode.get() == TMscan:
sEXwavSLabel['text'] = 'Park (nm)' # EXscan - EX wavelength Parked
sEXwavELabel['text'] = '' # - End label cleared
sEXwavEEntry['state'] = DISABLED # - End entry disabled
sEXwavILabel['text'] = '' # - Inc label cleared
sEXwavIEntry['state'] = DISABLED # - End entry disabled
sEMwavSLabel['text'] = 'Park (nm)' # EMscan - EM wavelength Parked
sEMwavELabel['text'] = '' # - End label cleared
sEMwavEEntry['state'] = DISABLED # - End entry disabled
sEMwavILabel['text'] = '' # - Inc label cleared
sEMwavIEntry['state'] = DISABLED # - Inc entry disabled
sTMwavPLabel['text'] = 'Pause(S)' # TMscam - Pause label set
sTMwavPEntry['state'] = NORMAL # - Pause entry enabled
sTMwavELabel['text'] = 'End (S)' # - End label set
sTMwavEEntry['state'] = NORMAL # - End entry enabled
else:
err = 'Internal Errr: undefined scan mode: {} !'
mBox.showerror(title='Fatal Error'
,message=err.format(varScanMode.get()))
sys.exit(0)
#
scanSanityCheck() # update out-of-bounds parameter coloring
return
#
setScanMode_FR = setScanMode # resolve the Forward Reference to function
#====================================
## Specimen Details Frame
#
#-------
sdFrame = LabelFrame(ssbFrame,text='Specimen Details', bg = TANBG, borderwidth=0)
sdFrame.grid(row=1,column=0, pady=4, sticky=NW)
sdEntry = Entry(sdFrame, textvariable=varSpecimenDetails ,
width=96, bg = 'white', border=2, relief=SUNKEN, font=monoFont14)
sdEntry.grid(row=0, column=0, padx=20, pady=2, sticky=EW)
sdEntry.bind('<KeyRelease>',updatePlot)
#====================================
## Real Time data Frame -- frame inside Experiment Frame
#
# Frame to hold real time data
#-------
rtdmFrame = LabelFrame(esfFrame, text='Live Data', bg = TANBG, borderwidth=4)
rtdmFrame.grid(row=0,column=1, padx=4, pady=2,sticky=NS+E)
#
#
# Real Time Data -- Row 0 => Signal
#-------
rtdmLabel00 = Label(rtdmFrame, text='S:', font=monoFont14, bg = TANBG )
rtdmLabel00.grid(row=0, column=0,sticky=E)
#-------
rtdmLabel00 = Label(rtdmFrame, textvariable=varLiveSignal
,border=0, relief=FLAT, bg='white'
,width=15, font=monoFont12, anchor=E )
rtdmLabel00.grid(row=0, column=1, padx=4, pady=2, sticky=W)
#
# Real Time Data -- Row 1 => Reference
#-------
rtdmLabel10 = Label(rtdmFrame, text='R:', font=monoFont14, bg = TANBG )
rtdmLabel10.grid(row=1, column=0,sticky=E)
#-------
rtdmLabel11 = Label(rtdmFrame, textvariable=varLiveReference
,border=0, relief=FLAT, bg='white'
,width=15, font=monoFont12, anchor=E )
rtdmLabel11.grid(row=1, column=1, padx=4, pady=2, sticky=W)
#
# Real Time Data -- Row 2 => PCT (%) scan complete
#-------
rtdmLabel40 = Label(rtdmFrame, text='%:', font=monoFont14, bg = TANBG )
rtdmLabel40.grid(row=2, column=0,sticky=E)
rtdmProgress41 = Progressbar(rtdmFrame, orient='horizontal'
,mode='determinate', variable=varPCTdone
,length=124)
rtdmProgress41.grid(row=2, column=1, padx=4, pady=2,sticky=W)
#
#
# FRAME for Real Time Data2 -- EX/EM position and HV readings
#
rtdmFrame2 = Frame(rtdmFrame, bg = TANBG)
rtdmFrame2.grid(row=3,columnspan=2, padx=0, pady=0,sticky=NSEW)
#
# Real Time Data2 -- Row 0,[Col 0&1] => EX monochrometer position (nm)
#-------
rtdm2Label00 = Label(rtdmFrame2, text='EX:', font=monoFont14, bg = TANBG )
rtdm2Label00.grid(row=0, column=0,sticky=E)
#-------
rtdm2Label01 = Label(rtdmFrame2, textvariable=varLiveEXpos
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label01.grid(row=0, column=1, padx=2, pady=2, sticky=W)
#
# Real Time Data -- Row 0,[Col 2&3] => EM monochrometer position (nm)
#-------
rtdm2Label02 = Label(rtdmFrame2, text='EM:', font=monoFont14, bg = TANBG )
rtdm2Label02.grid(row=0, column=2,sticky=E)
#-------
rtdm2Label03 = Label(rtdmFrame2, textvariable=varLiveEMpos
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label03.grid(row=0, column=3, padx=2, pady=2, sticky=W)
#
# Real Time Data2 -- Row 1,[Col 0&1] => EM PMT HV readings (v)
#-------
rtdm2Label10 = Label(rtdmFrame2, text='HVm:', font=monoFont14, bg = TANBG )
rtdm2Label10.grid(row=1, column=0,sticky=E)
#-------
rtdm2Label11 = Label(rtdmFrame2, textvariable=varLiveEMhv
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label11.grid(row=1, column=1, padx=2, pady=2, sticky=W)
#
# Real Time Data -- Row 1,[Col 2&3] => REF PMT HV readings (v)
#-------
rtdm2Label22 = Label(rtdmFrame2, text='HVr:', font=monoFont14, bg = TANBG )
rtdm2Label22.grid(row=1, column=2,sticky=E)
#-------
rtdm2Label23 = Label(rtdmFrame2, textvariable=varLiveREFhv
,border=0, relief=FLAT, bg='white'
,width=4, font=monoFont12, anchor=E )
rtdm2Label23.grid(row=1, column=3, padx=2, pady=2, sticky=W)
#====================================
## Plotting Frame
#
#-------
plotFrame = Frame(efFrame, bg = TANBG, borderwidth=0)
plotFrame.grid(row=2,column=0, sticky=NSEW)
#
fig = Figure(figsize = (11.56,6), dpi=100) # TopLevel container for all plot elements
#
# initialize the "plot" element as "ax"
#
ax = fig.add_subplot(111, axisbg='w')
#
canvas = FigureCanvasTkAgg(fig, master=plotFrame)
canvas.get_tk_widget().grid(row=0,column=0, padx=2)
#
def updatePlot():
global ax
global scanDataX,scanDataY
global backgroundDataX,backgroundDataY
# #
# # returns Axes instance for single plot
# try:
# fig.axes.remove(ax)
# except:
# pass
#print('CALLED: updatePlot() len(scanDataX)={}'.format(len(scanDataX)))
#
# remove 'old' lines before re-draw
while len(ax.lines):
ax.lines.remove(ax.lines[-1])
#
# Get correct scaling for X axis
#
minX = 200
maxX = 1000
sm = varScanMode.get()
if sm == EXscan:
if jjltest:
print('Error: EXscan not implemented.')
else:
mBox.showerror(message='Error: EXscan not implemented.')
startX = minX
endX = maxX
elif sm == EMscan:
if getVarInt(varEMwaveEnd) - getVarInt(varEMwaveStart) < 2:
startX = minX
endX = maxX
else:
startX = getVarInt(varEMwaveStart)
endX = getVarInt(varEMwaveEnd)
elif sm == TMscan:
if jjltest:
print('Error: TMscan not implemented.')
else:
mBox.showerror(message='Error: TMscan not implemented.')
startX = minX
endX = maxX
else:
mErr('Error: updatePlot() invalid varScanMode')
sys.exit(0)
#
# Get correct scaling for Y axis
#
if len(scanDataY) < 2 :
maxScanY = 5000 # default if NO scan data
else:
maxScanY = 1.1*max(scanDataY)
#
if len(backgroundDataY) < 2 :
maxInputY = 5000 # default if NO input (reference) data
else:
maxInputY = 1.1*max(backgroundDataY)
#
maxY = max(5000, maxScanY, maxInputY)
#
# set the X & Y sizes for axes now
#
ax.axis([startX, endX, 0, maxY ])
#
setPlotTitle()
ax.set_ylabel('counts')
#
# plot "background" waveform (IF one has been loaded)
if len(backgroundDataX) > 1:
if jjltest:
print('\nbefore: len(ax.lines)={}'.format(len(ax.lines)))
#ax.plot(scanDataX, scanDataY, 'b')
if jjltest:
print('mid: len(ax.lines)={}'.format(len(ax.lines)))
ax.plot(backgroundDataX, backgroundDataY, 'g')
if jjltest:
print('after: len(ax.lines)={}'.format(len(ax.lines)))
if jjltest:
txt_ = 'len(backgroundDataX):{}, len(backgroundDataY):{}'
print(txt_.format(len(backgroundDataX),len(backgroundDataY)))
#
# xlabel depends upon type of scan: (varScanMode)
# EXscan = 0, EMscan = 1, TMscan = 2;
#
if varScanMode.get() == TMscan:
ax.set_xlabel('time (S)') # scan by time
else:
ax.set_xlabel('wavelength (nm)') # scan by wavelength
#
# set up "cursor" to display values from plot
#
cursor = Cursor(ax, horizOn=False, useblit=True, color='red', linewidth=2 )
#cursor = Cursor(ax, horizOn=False, color='red', linewidth=2 )
#
canvas.show()
#
updatePlot_FR = updatePlot # resolve the Forward Reference to updatePlot()
# ========================
#=================
## Start up Window
#
setScanMode(EMscan) # establish default EX scan type
updatePlot() # draw the graph
#
PowerUp() # initialize settings & calibrate SPEX
#
siWin.mainloop()
|
jluscher/SCANIT
|
scanit_v033.py
|
Python
|
cc0-1.0
| 86,949
|
[
"VisIt"
] |
0f3f39f0ab69fcc13b883f8436de21ecd61507074ed6a8491b178a32348d2057
|
#!/usr/bin/env python
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
import numpy as np
import re
import sys
import argparse
import os.path
def get_fermi(fname):
with open(fname) as myfile:
for line in myfile:
m = re.search('E-fermi\s*\:\s*(.*)\XC', line)
if m is not None:
return float(m.group(1))
def read_band(fname='wannier90.up_band.dat', fermi_shift=None):
"""
plot the wannier band.
:param fname: the filename of the _band.dat
:param fermi_shift: (None | float) if None, no shift, else, all the energies are shifted down by fermi_shift.
"""
kslist = []
ks = []
ekslist = []
eks = []
wkslist = []
wks = []
have_weight = False
with open(fname) as myfile:
for line in myfile:
if len(line) > 5:
vals = map(float, line.strip().split())
ks.append(vals[0])
if fermi_shift is None:
eks.append(vals[1])
else:
eks.append(vals[1] - fermi_shift)
if len(vals) == 3:
have_weight = True
wks.append(vals[2])
else:
kslist.append(ks)
ekslist.append(eks)
wkslist.append(wks)
ks = []
eks = []
wks = []
if have_weight:
return kslist, ekslist, wkslist
else:
return kslist, ekslist
def plot_band_from_data(es, kpts=None, efermi=None, labels=None):
"""
plot the band.
:param es: energies, nkpts*nbands array.
"""
plt.cla()
nbands, nkpts = np.asarray(es).shape
if kpts is None:
kpts = np.arrzy(range(nkpts))
if len(np.asarray(kpts).shape) == 2:
kpts = kpts[0]
xmax = max(kpts)
plt.xlim([0, xmax])
if efermi is not None:
plt.axhline(y=0.0, linestyle='--', color='blue')
es = np.asarray(es) - efermi
for e in es:
plt.plot(kpts, e)
if labels is not None:
plt.xticks(labels[0], labels[1])
for x in labels[0]:
plt.axvline(x=x, color='gray')
plt.show()
def plot_band(fname='wannier90.up_band.dat', efermi=None):
"""
plot the band with data read from wannier90 output data.
"""
plt.clf()
kslist, ekslist = read_band(fname, fermi_shift=efermi)[:-1]
xmax = max(kslist[0])
plt.xlim([0, xmax])
for ks, eks in zip(kslist, ekslist):
plt.plot(ks, eks)
plt.show()
def plot_band_weight(kslist,
ekslist,
wkslist=None,
efermi=None,
yrange=None,
style='alpha',
color='blue',
axis=None,
width=10,
xticks=None,
cmap=mpl.cm.coolwarm,
weight_min=-4,
weight_max=4,
shift_fermi=True):
if axis is None:
fig, a = plt.subplots()
else:
a = axis
if efermi is not None and shift_fermi:
ekslist = np.array(ekslist) - efermi
xmax = max(kslist[0])
if yrange is None:
yrange = (np.array(ekslist).flatten().min() - 0.66,
np.array(ekslist).flatten().max() + 0.66)
if wkslist is not None:
for i in range(len(kslist)):
x = kslist[i]
y = ekslist[i]
#lwidths=np.ones(len(x))
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
if style == 'width':
lwidths = np.array(wkslist[i]) * width
lc = LineCollection(segments, linewidths=lwidths, colors=color)
elif style == 'alpha':
lwidths = np.array(wkslist[i]) * width
lc = LineCollection(
segments,
linewidths=[4] * len(x),
colors=[
colorConverter.to_rgba(
color, alpha=lwidth / (width + 0.001))
for lwidth in lwidths
])
elif style == 'color' or style == 'colormap':
norm = mpl.colors.Normalize(vmin=weight_min, vmax=weight_max)
lwidths = np.array(wkslist[i]) * 10
#norm = mpl.colors.SymLogNorm(linthresh=0.03,vmin=weight_min, vmax=weight_max)
m = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
#lc = LineCollection(segments,linewidths=np.abs(norm(lwidths)-0.5)*1, colors=[m.to_rgba(lwidth) for lwidth in lwidths])
lc = LineCollection(
segments,
linewidths=lwidths,
colors=[m.to_rgba(lwidth) for lwidth in lwidths])
a.add_collection(lc)
if axis is None:
for ks, eks in zip(kslist, ekslist):
plt.plot(ks, eks, color='gray', linewidth=0.01)
a.set_xlim(0, xmax)
a.set_ylim(yrange)
if xticks is not None:
plt.xticks(xticks[1], xticks[0])
for t in xticks[1]:
plt.axvline(t, linewidth=0.8)
if efermi is not None:
if shift_fermi:
plt.axhline(linestyle='--', color='black')
else:
plt.axhline(efermi,linestyle='--', color='black')
return a
def plot_band_weight_file(fname='wannier90.up_band.dat',
efermi=None,
weight=True,
yrange=None,
output=None,
style='alpha',
color='blue',
axis=None,
width=10):
"""
plot the band with projection
"""
#plt.cla()
if weight:
kslist, ekslist, wkslist = read_band(fname, fermi_shift=efermi)
else:
kslist, ekslist = read_band(fname, fermi_shift=efermi)[:2]
wkslist = None
if fname[:-3].endswith('band.'):
xticks = read_xtics(fname[:-3] + 'gnu')
else:
xticks = read_xtics(fname[:fname.rfind('_')] + '.gnu')
return plot_band_weight(
kslist,
ekslist,
wkslist=wkslist,
efermi=efermi,
yrange=yrange,
output=output,
style=style,
color=color,
axis=axis,
width=width,
xticks=xticks)
def read_xtics(fname='wannier90.up_band.gnu'):
text = open(fname).read()
m = re.search('xtics\s*\((.*)\)\n', text)
ts = m.group(1).split(',')
names = []
xs = []
for t in ts:
r = re.search(r'"\s*(.*)\s*"', t).group(1)
x = float(t.strip().split()[-1])
#print r,x
names.append(r)
xs.append(x)
return names, xs
def main():
parser = argparse.ArgumentParser(description='plot wannier bands.')
parser.add_argument('fname', type=str, help='dat filename')
parser.add_argument(
'-e', '--efermi', type=float, help='Fermi energy', default=None)
parser.add_argument(
'-o', '--output', type=str, help='output filename', default=None)
parser.add_argument(
'-w',
'--weight',
action='store_true',
help='use -w to plot weighted band.')
parser.add_argument(
'-y',
'--yrange',
type=float,
nargs='+',
help='range of yticks',
default=None)
parser.add_argument(
'-s',
'--style',
type=str,
help='style of line, width | alpha',
default='width')
args = parser.parse_args()
if args.output is None:
output = os.path.splitext(args.fname)[0] + '.png'
if args.efermi is None:
efermi = get_fermi('SCF/OUTCAR')
plot_band_weight_file(
fname=args.fname,
efermi=efermi,
weight=args.weight,
yrange=args.yrange,
style=args.style)
if output is not None:
plt.savefig(output)
plt.show()
if __name__ == '__main__':
main()
#plot_band()
#plot_band_weight(efermi=9.05)
#print read_xtics()
|
mailhexu/pyDFTutils
|
pyDFTutils/plot/wannier_band_plot.py
|
Python
|
lgpl-3.0
| 8,300
|
[
"Wannier90"
] |
bcadd1783b4fc980977ed49f60b4ce141c948f53a1b6226e4c4c0154f894e7ad
|
from .test_utils import TempDirectoryTestCase
from lwr.managers.base import JobDirectory
from lwr.tools.validator import ExpressionValidator
from os.path import join
class ValidatorTest(TempDirectoryTestCase):
def test_literal(self):
xml = """
<command_validator>
<literal value="tophat2" />
</command_validator>"""
self.__assertValid(xml, "tophat2")
self.__assertInvalid(xml, "bowtie")
def test_two_literals(self):
xml = """
<command_validator>
<literal value="python" />
<literal value="setup.py" />
</command_validator>"""
self.__assertValid(xml, "python setup.py")
self.__assertInvalid(xml, "pythonsetup.py")
def test_parameter(self):
xml = """
<command_validator>
<literal value="tophat2" />
<parameter name="--mate-std-dev">
<literal value="4" />
</parameter>
</command_validator>"""
self.__assertValid(xml, "tophat2 --mate-std-dev 4")
self.__assertValid(xml, "tophat2 --mate-std-dev=4")
self.__assertInvalid(xml, "tophat2 --mate-std-dev=5")
def test_integer(self):
xml = """
<command_validator>
<literal value="tophat2" />
<parameter name="--mate-std-dev">
<integer />
</parameter>
</command_validator>"""
self.__assertValid(xml, "tophat2 --mate-std-dev 4")
self.__assertValid(xml, "tophat2 --mate-std-dev=4")
self.__assertInvalid(xml, "tophat2 --mate-std-dev=5.0")
def test_float(self):
xml = """
<command_validator>
<literal value="tophat2" />
<parameter name="--mate-std-dev">
<float />
</parameter>
</command_validator>"""
self.__assertValid(xml, "tophat2 --mate-std-dev 4")
self.__assertValid(xml, "tophat2 --mate-std-dev=4")
self.__assertValid(xml, "tophat2 --mate-std-dev=5.0")
self.__assertValid(xml, "tophat2 --mate-std-dev=4e10")
self.__assertValid(xml, "tophat2 --mate-std-dev=-1.0e10")
self.__assertValid(xml, "tophat2 --mate-std-dev=-.0e10")
self.__assertInvalid(xml, "tophat2 --mate-std-dev=cat")
def test_tool_wrapper(self):
xml = """
<command_validator>
<tool_wrapper name="tool1_wrapper.py" />
</command_validator>
"""
self.__assertValid(xml, "%s" % self.__job_file('tool_files', 'tool1_wrapper.py'))
self.__assertInvalid(xml, "tool1_wrapper.py")
def test_config_file(self):
xml = """
<command_validator>
<literal value="tophat2" />
<configfile name="top_opts" />
</command_validator>
"""
self.__assertValid(xml, "tophat2 %s" % self.__job_file('configs', 'top_opts'))
self.__assertInvalid(xml, "tophat2 ../%s" % self.__job_file('configs', 'top_opts'))
self.__assertInvalid(xml, "tophat2 %s" % self.__job_file('configs', 'top_optsX'))
def test_input_file(self):
xml = """
<command_validator>
<literal value="tophat2" />
<input />
</command_validator>
"""
self.__assertValid(xml, "tophat2 %s" % self.__job_file("inputs", "dataset_23412.dat"))
self.__assertInvalid(xml, "tophat2 %s/../../../dataset23412.dat" % self.__job_file("inputs", "dataset_23412.dat"))
self.__assertInvalid(xml, "tophat2 ../%s" % self.__job_file("inputs", "dataset_23412.dat"))
def test_two_inputs(self):
xml = """
<command_validator>
<literal value="tophat2" />
<input />
<input />
</command_validator>
"""
self.__assertValid(xml, "tophat2 %s %s" % (self.__job_file("inputs", "dataset_23412.dat"),
self.__job_file("inputs", "dataset_1.dat")))
self.__assertInvalid(xml, "tophat2 %s ../%s" % (self.__job_file("inputs", "dataset_23412.dat"),
self.__job_file("inputs", "dataset_1.dat")))
def test_outputs_file(self):
xml = """
<command_validator>
<literal value="tophat2" />
<output />
<output />
</command_validator>
"""
self.__assertValid(xml, "tophat2 %s %s" % (self.__job_file("outputs", "dataset_23412.dat"),
self.__job_file("outputs", "dataset_1.dat")))
self.__assertInvalid(xml, "tophat2 %s ../%s" % (self.__job_file("outputs", "dataset_23412.dat"),
self.__job_file("outputs", "dataset_1.dat")))
def test_outputs_from_work_dir(self):
xml = """
<command_validator>
<literal value="tophat2" />
<output />
<output from_work_dir="junctions.bed" />
</command_validator>
"""
self.__assertValid(xml, "tophat2 %s %s" % (self.__job_file("outputs", "dataset_23412.dat"),
self.__job_file("working", "junctions.bed")))
self.__assertInvalid(xml, "tophat2 %s ../%s" % (self.__job_file("outputs", "dataset_23412.dat"),
self.__job_file("working", "..", "junctions.bed")))
def test_single_quotes(self):
xml = """
<command_validator>
<literal value="tophat2" />
<parameter name="--mate-std-dev">
<literal value="4" single_quote="true" />
</parameter>
</command_validator>"""
self.__assertValid(xml, "tophat2 --mate-std-dev '4'")
self.__assertValid(xml, "tophat2 --mate-std-dev='4'")
self.__assertInvalid(xml, "tophat2 --mate-std-dev=4")
self.__assertInvalid(xml, "tophat2 --mate-std-dev=\"4\"")
def test_double_quotes(self):
xml = """
<command_validator>
<literal value="tophat2" />
<parameter name="--mate-std-dev">
<literal value="4" double_quote="true" />
</parameter>
</command_validator>"""
self.__assertValid(xml, "tophat2 --mate-std-dev \"4\"")
self.__assertValid(xml, "tophat2 --mate-std-dev=\"4\"")
self.__assertInvalid(xml, "tophat2 --mate-std-dev=4")
self.__assertInvalid(xml, "tophat2 --mate-std-dev='4'")
def test_min(self):
xml = """
<command_validator>
<literal value="tophat2" />
<parameter name="--mate-std-dev" min="0">
<literal value="4" double_quote="true" />
</parameter>
</command_validator>"""
self.__assertValid(xml, "tophat2 --mate-std-dev \"4\"")
self.__assertValid(xml, "tophat2 ")
self.__assertInvalid(xml, "tophat2 --mate-std-dev=5")
def test_max(self):
xml = """
<command_validator>
<literal value="tophat2" />
<regex value="[a-z]" max="2"/>
</command_validator>"""
self.__assertValid(xml, "tophat2 a")
self.__assertValid(xml, "tophat2 a b")
self.__assertInvalid(xml, "tophat2 a b c")
def test_group(self):
xml = """
<command_validator>
<literal value="tophat2" />
<group>
<literal value="a" />
<regex value="[b-z]+" />
<literal value="a" />
</group>
</command_validator>"""
self.__assertValid(xml, "tophat2 aba")
self.__assertValid(xml, "tophat2 abba")
self.__assertInvalid(xml, "tophat2 abbbaa")
self.__assertInvalid(xml, "tophat2 abb")
def test_group_separate_by(self):
xml = """
<command_validator>
<literal value="tophat2" />
<group separate_by="x">
<literal value="a" />
<regex value="[b-z]+" />
<literal value="a" />
</group>
</command_validator>"""
self.__assertValid(xml, "tophat2 axbxa")
self.__assertValid(xml, "tophat2 axbxbxa")
self.__assertInvalid(xml, "tophat2 abba")
self.__assertInvalid(xml, "tophat2 axbxbxbxaxa")
def __job_file(self, *args):
return join(self.temp_directory, '1', *args)
def __validator(self, xml):
return ExpressionValidator(xml)
@property
def job_directory(self):
return JobDirectory(self.temp_directory, '1')
def __is_valid(self, xml, contents):
return self.__validator(xml).validate(self.job_directory, contents)
def __assertValid(self, xml, contents):
self.assertTrue(self.__is_valid(xml, contents), "%s did not validate against %s" % (contents, xml))
def __assertInvalid(self, xml, contents):
self.assertFalse(self.__is_valid(xml, contents), "%s falsely validated against %s" % (contents, xml))
|
jmchilton/lwr
|
test/validator_test.py
|
Python
|
apache-2.0
| 9,029
|
[
"Bowtie"
] |
484fc11d89a535a787ee1e57c64a853a356dc8de55479daf63188447201e5b5d
|
"""
Some generic utility routines for number handling and
calculating (specific) variances
"""
import logging
import itertools
import numpy
from tkp.utility import containers
from tkp.utility.memoize import Memoize
from tkp.sourcefinder import utils
from tkp.sourcefinder import stats
from tkp.sourcefinder import extract
try:
import ndimage
except ImportError:
from scipy import ndimage
logger = logging.getLogger(__name__)
#
# Hard-coded configuration parameters; not user settable.
#
INTERPOLATE_ORDER = 1 # Spline order for grid interpolation
MEDIAN_FILTER = 0 # If non-zero, apply a median filter of size
# MEDIAN_FILTER to the background and RMS grids prior
# to interpolating.
MF_THRESHOLD = 0 # If MEDIAN_FILTER is non-zero, only use the filtered
# grid when the (absolute) difference between the raw
# and filtered grids is larger than MF_THRESHOLD.
DEBLEND_MINCONT = 0.005 # Min. fraction of island flux in deblended subisland
STRUCTURING_ELEMENT = [[0,1,0], [1,1,1], [0,1,0]] # Island connectiivty
class ImageData(object):
"""Encapsulates an image in terms of a numpy array + meta/headerdata.
This is your primary contact point for interaction with images: it icludes
facilities for source extraction and measurement, etc.
"""
def __init__(self, data, beam, wcs, margin=0, radius=0, back_size_x=32,
back_size_y=32, residuals=True
):
"""Sets up an ImageData object.
*Args:*
- data (2D numpy.ndarray): actual image data
- wcs (utility.coordinates.wcs): world coordinate system
specification
- beam (3-tuple): beam shape specification as
(semimajor, semiminor, theta)
"""
# Do data, wcs and beam need deepcopy?
# Probably not (memory overhead, in particular for data),
# but then the user shouldn't change them outside ImageData in the
# mean time
self.rawdata = data # a 2D numpy array
self.wcs = wcs # a utility.coordinates.wcs instance
self.beam = beam # tuple of (semimaj, semimin, theta)
self.clip = {}
self.labels = {}
self.freq_low = 1
self.freq_high = 1
self.back_size_x = back_size_x
self.back_size_y= back_size_y
self.margin = margin
self.radius = radius
self.residuals = residuals
###########################################################################
# #
# Properties and attributes. #
# #
# Properties are attributes managed by methods; rather than calling the #
# method directly, the attribute automatically invokes it. We can use #
# this to do cunning transparent caching ("memoizing") etc; see the #
# Memoize class. #
# #
# clearcache() clears all the memoized data, which can get quite large. #
# It may be wise to call this, for example, in an exception handler #
# dealing with MemoryErrors. #
# #
###########################################################################
@Memoize
def _grids(self):
"""Gridded RMS and background data for interpolating"""
return self.__grids()
grids = property(fget=_grids, fdel=_grids.delete)
@Memoize
def _backmap(self):
"""Background map"""
if not hasattr(self, "_user_backmap"):
return self._interpolate(self.grids['bg'])
else:
return self._user_backmap
def _set_backmap(self, bgmap):
self._user_backmap = bgmap
del(self.backmap)
del(self.data_bgsubbed)
backmap = property(fget=_backmap, fdel=_backmap.delete, fset=_set_backmap)
@Memoize
def _get_rm(self):
"""RMS map"""
if not hasattr(self, "_user_noisemap"):
return self._interpolate(self.grids['rms'], roundup=True)
else:
return self._user_noisemap
def _set_rm(self, noisemap):
self._user_noisemap = noisemap
del(self.rmsmap)
rmsmap = property(fget=_get_rm, fdel=_get_rm.delete, fset=_set_rm)
@Memoize
def _get_data(self):
"""Masked image data"""
# We will ignore all the data which is masked for the rest of the
# sourcefinding process. We build up the mask by stacking ("or-ing
# together") a number of different effects:
#
# * A margin from the edge of the image;
# * Any data outside a given radius from the centre of the image;
# * Data which is "obviously" bad (equal to 0 or NaN).
mask = numpy.zeros((self.xdim, self.ydim))
if self.margin:
margin_mask = numpy.ones((self.xdim, self.ydim))
margin_mask[self.margin:-self.margin, self.margin:-self.margin] = 0
mask = numpy.logical_or(mask, margin_mask)
if self.radius:
radius_mask = utils.circular_mask(self.xdim, self.ydim, self.radius)
mask = numpy.logical_or(mask, radius_mask)
mask = numpy.logical_or(mask, numpy.isnan(self.rawdata))
return numpy.ma.array(self.rawdata, mask=mask)
data = property(fget=_get_data, fdel=_get_data.delete)
@Memoize
def _get_data_bgsubbed(self):
"""Background subtracted masked image data"""
return self.data - self.backmap
data_bgsubbed = property(fget=_get_data_bgsubbed,
fdel=_get_data_bgsubbed.delete)
@property
def xdim(self):
"""X pixel dimension of (unmasked) data"""
return self.rawdata.shape[0]
@property
def ydim(self):
"""Y pixel dimension of (unmasked) data"""
return self.rawdata.shape[1]
@property
def pixmax(self):
"""Maximum pixel value (pre-background subtraction)"""
return self.data.max()
@property
def pixmin(self):
"""Minimum pixel value (pre-background subtraction)"""
return self.data.min()
def clearcache(self):
"""Zap any calculated data stored in this object.
Clear the background and rms maps, labels, clip, and any locally held
data. All of these can be reconstructed from the data accessor.
Note that this *must* be run to pick up any new settings.
"""
self.labels.clear()
self.clip.clear()
del(self.backmap)
del(self.rmsmap)
del(self.data)
del(self.data_bgsubbed)
del(self.grids)
if hasattr(self, 'residuals_from_gauss_fitting'):
del(self.residuals_from_gauss_fitting)
if hasattr(self, 'residuals_from_deblending'):
del(self.residuals_from_deblending)
###########################################################################
# #
# General purpose image handling. #
# #
# Routines for saving and trimming data, and calculating background/RMS #
# maps (in conjuntion with the properties above). #
# #
###########################################################################
# Private "support" methods
def __grids(self):
"""Calculate background and RMS grids of this image.
These grids can be interpolated up to make maps of the original image
dimensions: see _interpolate().
This is called automatically when ImageData.backmap,
ImageData.rmsmap or ImageData.fdrmap is first accessed.
"""
# We set up a dedicated logging subchannel, as the sigmaclip loop
# logging is very chatty:
sigmaclip_logger = logging.getLogger(__name__+'.sigmaclip')
# there's no point in working with the whole of the data array
# if it's masked.
useful_chunk = ndimage.find_objects(numpy.where(self.data.mask, 0, 1))
assert(len(useful_chunk) == 1)
useful_data = self.data[useful_chunk[0]]
my_xdim, my_ydim = useful_data.shape
rmsgrid, bggrid = [], []
for startx in xrange(0, my_xdim, self.back_size_x):
rmsrow, bgrow = [], []
for starty in xrange(0, my_ydim, self.back_size_y):
chunk = useful_data[
startx:startx + self.back_size_x,
starty:starty + self.back_size_y
].ravel()
if not chunk.any():
rmsrow.append(False)
bgrow.append(False)
continue
chunk, sigma, median, num_clip_its = stats.sigma_clip(
chunk, self.beam)
if len(chunk) == 0 or not chunk.any():
rmsrow.append(False)
bgrow.append(False)
else:
mean = numpy.mean(chunk)
rmsrow.append(sigma)
# In the case of a crowded field, the distribution will be
# skewed and we take the median as the background level.
# Otherwise, we take 2.5 * median - 1.5 * mean. This is the
# same as SExtractor: see discussion at
# <http://terapix.iap.fr/forum/showthread.php?tid=267>.
# (mean - median) / sigma is a quick n' dirty skewness
# estimator devised by Karl Pearson.
if numpy.fabs(mean - median) / sigma >= 0.3:
sigmaclip_logger.debug(
'bg skewed, %f clipping iterations', num_clip_its)
bgrow.append(median)
else:
sigmaclip_logger.debug(
'bg not skewed, %f clipping iterations', num_clip_its)
bgrow.append(2.5 * median - 1.5 * mean)
rmsgrid.append(rmsrow)
bggrid.append(bgrow)
rmsgrid = numpy.ma.array(
rmsgrid, mask=numpy.where(numpy.array(rmsgrid) == False, 1, 0))
bggrid = numpy.ma.array(
bggrid, mask=numpy.where(numpy.array(bggrid) == False, 1, 0))
return {'rms': rmsgrid, 'bg': bggrid}
def _interpolate(self, grid, roundup=False):
"""
Interpolate a grid to produce a map of the dimensions of the image.
Args:
grid (numpy.ma.MaskedArray)
Kwargs:
roundup (bool)
Returns:
(numpy.ma.MaskedArray)
Used to transform the RMS, background or FDR grids produced by
L{_grids()} to a map we can compare with the image data.
If roundup is true, values of the resultant map which are lower than
the input grid are trimmed.
"""
# there's no point in working with the whole of the data array if it's
# masked.
useful_chunk = ndimage.find_objects(numpy.where(self.data.mask, 0, 1))
assert(len(useful_chunk) == 1)
my_xdim, my_ydim = self.data[useful_chunk[0]].shape
if MEDIAN_FILTER:
f_grid = ndimage.median_filter(grid, MEDIAN_FILTER)
if MF_THRESHOLD:
grid = numpy.where(
numpy.fabs(f_grid - grid) > MF_THRESHOLD, f_grid, grid
)
else:
grid = f_grid
# Bicubic spline interpolation
xratio = float(my_xdim)/self.back_size_x
yratio = float(my_ydim)/self.back_size_y
# First arg: starting point. Second arg: ending point. Third arg:
# 1j * number of points. (Why is this complex? Sometimes, NumPy has an
# utterly baffling API...)
slicex = slice(-0.5, -0.5+xratio, 1j*my_xdim)
slicey = slice(-0.5, -0.5+yratio, 1j*my_ydim)
my_map = numpy.ma.MaskedArray(numpy.zeros(self.data.shape),
mask = self.data.mask)
# Remove the MaskedArrayFutureWarning warning and keep old numpy < 1.11
# behavior
my_map.unshare_mask()
my_map[useful_chunk[0]] = ndimage.map_coordinates(
grid, numpy.mgrid[slicex, slicey],
mode='nearest', order=INTERPOLATE_ORDER)
# If the input grid was entirely masked, then the output map must
# also be masked: there's no useful data here. We don't search for
# sources on a masked background/RMS, so this data will be cleanly
# skipped by the rest of the sourcefinder
if numpy.ma.getmask(grid).all():
my_map.mask = True
elif roundup:
# In some cases, the spline interpolation may produce values
# lower than the minimum value in the map. If required, these
# can be trimmed off. No point doing this if the map is already
# fully masked, though.
my_map = numpy.ma.MaskedArray(
data = numpy.where(
my_map >= numpy.min(grid), my_map, numpy.min(grid)),
mask = my_map.mask
)
return my_map
###########################################################################
# #
# Source extraction. #
# #
# Provides for both traditional (islands-above-RMS) and FDR source #
# extraction systems. #
# #
###########################################################################
def extract(self, det, anl, noisemap=None, bgmap=None, labelled_data=None,
labels=None, deblend_nthresh=0, force_beam=False):
"""
Kick off conventional (ie, RMS island finding) source extraction.
Kwargs:
det (float): detection threshold, as a multiple of the RMS
noise. At least one pixel in a source must exceed this
for it to be regarded as significant.
anl (float): analysis threshold, as a multiple of the RMS
noise. All the pixels within the island that exceed
this will be used when fitting the source.
noisemap (numpy.ndarray):
bgmap (numpy.ndarray):
deblend_nthresh (int): number of subthresholds to use for
deblending. Set to 0 to disable.
force_beam (bool): force all extractions to have major/minor axes
equal to the restoring beam
Returns:
:class:`tkp.utility.containers.ExtractionResults`
"""
if anl > det:
logger.warn(
"Analysis threshold is higher than detection threshold"
)
# If the image data is flat we may as well crash out here with a
# sensible error message, otherwise the RMS estimation code will
# crash out with a confusing error later.
if numpy.ma.max(self.data) == numpy.ma.min(self.data):
raise RuntimeError("Bad data: Image data is flat")
if (type(bgmap).__name__ == 'ndarray' or
type(bgmap).__name__ == 'MaskedArray'):
if bgmap.shape != self.backmap.shape:
raise IndexError("Background map has wrong shape")
else:
self.backmap = bgmap
if (type(noisemap).__name__ == 'ndarray' or
type(noisemap).__name__ == 'MaskedArray'):
if noisemap.shape != self.rmsmap.shape:
raise IndexError("Noisemap has wrong shape")
if noisemap.min() < 0:
raise ValueError("RMS noise cannot be negative")
else:
self.rmsmap = noisemap
if labelled_data is not None and labelled_data.shape != self.data.shape:
raise ValueError("Labelled map is wrong shape")
return self._pyse(
det * self.rmsmap, anl * self.rmsmap, deblend_nthresh, force_beam,
labelled_data=labelled_data, labels=labels
)
def reverse_se(self, det):
"""Run source extraction on the negative of this image.
Obviously, there should be no sources in the negative image, so this
tells you about the false positive rate.
We need to clear cached data -- backgroung map, cached clips, etc --
before & after doing this, as they'll interfere with the normal
extraction process. If this is regularly used, we'll want to
implement a separate cache.
"""
self.labels.clear()
self.clip.clear()
self.data_bgsubbed *= -1
results = self.extract(det=det)
self.data_bgsubbed *= -1
self.labels.clear()
self.clip.clear()
return results
def fd_extract(self, alpha, anl=None, noisemap=None,
bgmap=None, deblend_nthresh=0, force_beam=False
):
"""False Detection Rate based source extraction.
The FDR procedure guarantees that <FDR> < alpha.
See `Hopkins et al., AJ, 123, 1086 (2002)
<http://adsabs.harvard.edu/abs/2002AJ....123.1086H>`_.
"""
# The correlation length in config.py is used not only for the
# calculation of error bars with the Condon formulae, but also for
# calculating the number of independent pixels.
corlengthlong, corlengthshort = utils.calculate_correlation_lengths(
self.beam[0], self.beam[1])
C_n = (1.0 / numpy.arange(
round(0.25 * numpy.pi * corlengthlong *
corlengthshort + 1))[1:]).sum()
# Calculate the FDR threshold
# Things will go terribly wrong in the line below if the interpolated
# noise values get very close or below zero. Use INTERPOLATE_ORDER=1
# or the roundup option.
if (type(bgmap).__name__ == 'ndarray' or
type(bgmap).__name__ == 'MaskedArray'):
if bgmap.shape != self.backmap.shape:
raise IndexError("Background map has wrong shape")
else:
self.backmap = bgmap
if (type(noisemap).__name__ == 'ndarray' or
type(noisemap).__name__ == 'MaskedArray'):
if noisemap.shape != self.rmsmap.shape:
raise IndexError("Noisemap has wrong shape")
if noisemap.min()<0:
raise ValueError("RMS noise cannot be negative")
else:
self.rmsmap = noisemap
normalized_data = self.data_bgsubbed/self.rmsmap
n1 = numpy.sqrt(2 * numpy.pi)
prob = numpy.sort(numpy.ravel(numpy.exp(-0.5 * normalized_data**2)/n1))
lengthprob = float(len(prob))
compare = (alpha / C_n) * numpy.arange(lengthprob+1)[1:] / lengthprob
# Find the last undercrossing, see, e.g., fig. 9 in Miller et al., AJ
# 122, 3492 (2001). Searchsorted is not used because the array is not
# sorted.
try:
index = (numpy.where(prob-compare < 0.)[0]).max()
except ValueError:
# Everything below threshold
return containers.ExtractionResults()
fdr_threshold = numpy.sqrt(-2.0 * numpy.log(n1 * prob[index]))
# Default we require that all source pixels are above the threshold,
# not only the peak pixel. This gives a better guarantee that indeed
# the fraction of false positives is less than fdr_alpha in config.py.
# See, e.g., Hopkins et al., AJ 123, 1086 (2002).
if not anl:
anl = fdr_threshold
return self._pyse(fdr_threshold * self.rmsmap, anl * self.rmsmap,
deblend_nthresh, force_beam)
def flux_at_pixel(self, x, y, numpix=1):
"""Return the background-subtracted flux at a certain position
in the map"""
# numpix is the number of pixels to look around the target.
# e.g. numpix = 1 means a total of 9 pixels, 1 in each direction.
return self.data_bgsubbed[y-numpix:y+numpix+1,
x-numpix:x+numpix+1].max()
@staticmethod
def box_slice_about_pixel(x, y, box_radius):
"""
Returns a slice centred about (x,y), of width = 2*int(box_radius) + 1
"""
ibr = int(box_radius)
x = int(x)
y = int(y)
return (slice(x - ibr, x + ibr + 1),
slice(y - ibr, y + ibr + 1))
def fit_to_point(self, x, y, boxsize, threshold, fixed):
"""Fit an elliptical Gaussian to a specified point on the image.
The fit is carried on a square section of the image, of length
*boxsize* & centred at pixel coordinates *x*, *y*. Any data
below *threshold* * rmsmap is not used for fitting. If *fixed*
is set to ``position``, then the pixel coordinates are fixed
in the fit.
Returns an instance of :class:`tkp.sourcefinder.extract.Detection`.
"""
logger.debug("Force-fitting pixel location ({},{})".format(x, y))
# First, check that x and y are actually valid semi-positive integers.
# Otherwise,
# If they are too high (positive), then indexing will fail
# BUT, if they are negative, then we get wrap-around indexing
# and the fit continues at the wrong position!
if (x < 0 or x >self.xdim
or y < 0 or y >self.ydim ):
logger.warning("Dropping forced fit at ({},{}), "
"pixel position outside image".format(x,y)
)
return None
# Next, check if any of the central pixels (in a 3x3 box about the
# fitted pixel position) have been Masked
# (e.g. if NaNs, or close to image edge) - reject if so.
central_pixels_slice = ImageData.box_slice_about_pixel(x, y, 1)
if self.data.mask[central_pixels_slice].any():
logger.warning(
"Dropping forced fit at ({},{}), "
"Masked pixel in central fitting region".format(x,y))
return None
if ((
# Recent NumPy
hasattr(numpy.ma.core, "MaskedConstant") and
isinstance(self.rmsmap, numpy.ma.core.MaskedConstant)
) or (
# Old NumPy
numpy.ma.is_masked(self.rmsmap[int(x), int(y)])
)):
logger.error("Background is masked: cannot fit")
return None
chunk = ImageData.box_slice_about_pixel(x, y, boxsize/2.0)
if threshold is not None:
# We'll mask out anything below threshold*self.rmsmap from the fit.
labels, num = self.labels.setdefault( #Dictionary mapping threshold -> islands map
threshold,
ndimage.label(
self.clip.setdefault( #Dictionary mapping threshold -> mask
threshold,
numpy.where(
self.data_bgsubbed > threshold * self.rmsmap, 1, 0
)
)
)
)
mylabel = labels[int(x), int(y)]
if mylabel == 0: # 'Background'
raise ValueError("Fit region is below specified threshold, fit aborted.")
mask = numpy.where(labels[chunk] == mylabel, 0, 1)
fitme = numpy.ma.array(self.data_bgsubbed[chunk], mask=mask)
if len(fitme.compressed()) < 1:
raise IndexError("Fit region too close to edge or too small")
else:
fitme = self.data_bgsubbed[chunk]
if fitme.size < 1:
raise IndexError("Fit region too close to edge or too small")
if not len(fitme.compressed()):
logger.error("All data is masked: cannot fit")
return None
# set argument for fixed parameters based on input string
if fixed == 'position':
fixed = {'xbar': boxsize/2.0, 'ybar': boxsize/2.0}
elif fixed == 'position+shape':
fixed = {'xbar': boxsize/2.0, 'ybar': boxsize/2.0,
'semimajor': self.beam[0],
'semiminor': self.beam[1],
'theta': self.beam[2]}
elif fixed == None:
fixed = {}
else:
raise TypeError("Unkown fixed parameter")
if threshold is not None:
threshold_at_pixel = threshold * self.rmsmap[int(x), int(y)]
else:
threshold_at_pixel = None
try:
measurement, residuals = extract.source_profile_and_errors(
fitme,
threshold_at_pixel,
self.rmsmap[int(x), int(y)],
self.beam,
fixed=fixed
)
except ValueError:
# Fit failed to converge
# Moments are not applicable when holding parameters fixed
logger.error("Gaussian fit failed at %f, %f", x, y)
return None
try:
assert(abs(measurement['xbar']) < boxsize)
assert(abs(measurement['ybar']) < boxsize)
except AssertionError:
logger.warn('Fit falls outside of box.')
measurement['xbar'] += x-boxsize/2.0
measurement['ybar'] += y-boxsize/2.0
measurement.sig = (fitme / self.rmsmap[chunk]).max()
return extract.Detection(measurement, self)
def fit_fixed_positions(self, positions, boxsize, threshold=None,
fixed='position+shape',
ids=None):
"""
Convenience function to fit a list of sources at the given positions
This function wraps around fit_to_point().
Args:
positions (tuple): list of (RA, Dec) tuples. Positions to be fit,
in decimal degrees.
boxsize: See :py:func:`fit_to_point`
threshold: as above.
fixed: as above.
ids (tuple): A list of identifiers. If not None, then must match
the length and order of the ``requested_fits``. Any
successfully fit positions will be returned in a tuple
along with the matching id. As these are simply passed back to
calling code they can be a string, tuple or whatever.
In particular, boxsize is in pixel coordinates as in
fit_to_point, not in sky coordinates.
Returns:
tuple: A list of successful fits.
If ``ids`` is None, returns a single list of
:class:`tkp.sourcefinder.extract.Detection` s.
Otherwise, returns a tuple of two matched lists:
([detections], [matching_ids]).
"""
if ids is not None:
assert len(ids)==len(positions)
successful_fits = []
successful_ids = []
for idx, posn in enumerate(positions):
try:
x, y, = self.wcs.s2p((posn[0], posn[1]))
except RuntimeError, e:
if (str(e).startswith("wcsp2s error: 8:") or
str(e).startswith("wcsp2s error: 9:")):
logger.warning("Input coordinates (%.2f, %.2f) invalid: ",
posn[0], posn[1])
else:
raise
else:
try:
fit_results = self.fit_to_point(x, y,
boxsize=boxsize,
threshold=threshold,
fixed=fixed)
if not fit_results:
# We were unable to get a good fit
continue
if ( fit_results.ra.error == float('inf') or
fit_results.dec.error == float('inf')):
logging.warning("position errors extend outside image")
else:
successful_fits.append(fit_results)
if ids:
successful_ids.append(ids[idx])
except IndexError as e:
logger.warning("Input pixel coordinates (%.2f, %.2f) "
"could not be fit because: " + e.message,
posn[0], posn[1])
if ids:
return successful_fits, successful_ids
return successful_fits
def label_islands(self, detectionthresholdmap, analysisthresholdmap):
"""
Return a lablled array of pixels for fitting.
Args:
detectionthresholdmap (numpy.ndarray):
analysisthresholdmap (numpy.ndarray):
Returns:
list of valid islands (list of int)
labelled islands (numpy.ndarray)
"""
# If there is no usable data, we return an empty set of islands.
if not len(self.rmsmap.compressed()):
logging.warning("RMS map masked; sourcefinding skipped")
return [], numpy.zeros(self.data_bgsubbed.shape, dtype=numpy.int)
# At this point, we select all the data which is eligible for
# sourcefitting. We are actually using three separate filters, which
# exclude:
#
# 1. Anything which has been masked before we reach this point;
# 2. Any pixels which fall below the analysis threshold at that pixel
# position;
# 3. Any pixels corresponding to a position where the RMS noise is
# less than RMS_FILTER (default 0.001) times the median RMS across
# the whole image.
#
# The third filter attempts to exclude those regions of the image
# which contain no usable data; for example, the parts of the image
# falling outside the circular region produced by awimager.
RMS_FILTER = 0.001
clipped_data = numpy.ma.where(
(self.data_bgsubbed > analysisthresholdmap) &
(self.rmsmap >= (RMS_FILTER * numpy.ma.median(self.rmsmap))),
1, 0
).filled(fill_value=0)
labelled_data, num_labels = ndimage.label(clipped_data, STRUCTURING_ELEMENT)
labels_below_det_thr, labels_above_det_thr = [], []
if num_labels > 0:
# Select the labels of the islands above the analysis threshold
# that have maximum values values above the detection threshold.
# Like above we make sure not to select anything where either
# the data or the noise map are masked.
# We fill these pixels in above_det_thr with -1 to make sure
# its labels will not be in labels_above_det_thr.
# NB data_bgsubbed, and hence above_det_thr, is a masked array;
# filled() sets all mased values equal to -1.
above_det_thr = (
self.data_bgsubbed - detectionthresholdmap
).filled(fill_value=-1)
# Note that we avoid label 0 (the background).
maximum_values = ndimage.maximum(
above_det_thr, labelled_data, numpy.arange(1, num_labels + 1)
)
# If there's only one island, ndimage.maximum will return a float,
# rather than a list. The rest of this function assumes that it's
# always a list, so we need to convert it.
if isinstance(maximum_values, float):
maximum_values = [maximum_values]
# We'll filter out the insignificant islands
for i, x in enumerate(maximum_values, 1):
if x < 0:
labels_below_det_thr.append(i)
else:
labels_above_det_thr.append(i)
# Set to zero all labelled islands that are below det_thr:
labelled_data = numpy.where(
numpy.in1d(labelled_data.ravel(), labels_above_det_thr).reshape(labelled_data.shape),
labelled_data, 0
)
return labels_above_det_thr, labelled_data
def _pyse(
self, detectionthresholdmap, analysisthresholdmap,
deblend_nthresh, force_beam, labelled_data=None, labels=[]
):
"""
Run Python-based source extraction on this image.
Args:
detectionthresholdmap (numpy.ndarray):
analysisthresholdmap (numpy.ndarray):
deblend_nthresh (int): number of subthresholds for deblending. 0
disables.
force_beam (bool): force all extractions to have major/minor axes
equal to the restoring beam
labelled_data (numpy.ndarray): labelled island map (output of
numpy.ndimage.label()). Will be calculated automatically if not
provided.
labels (tuple): list of labels in the island map to use for
fitting.
Returns:
(..utility.containers.ExtractionResults):
This is described in detail in the "Source Extraction System" document
by John Swinbank, available from TKP svn.
"""
# Map our chunks onto a list of islands.
island_list = []
if labelled_data is None:
labels, labelled_data = self.label_islands(
detectionthresholdmap, analysisthresholdmap
)
# Get a bounding box for each island:
# NB Slices ordered by label value (1...N,)
# 'None' returned for missing label indices.
slices = ndimage.find_objects(labelled_data)
for label in labels:
chunk = slices[label-1]
analysis_threshold = (analysisthresholdmap[chunk] /
self.rmsmap[chunk]).max()
# In selected_data only the pixels with the "correct"
# (see above) labels are retained. Other pixel values are
# set to -(bignum).
# In this way, disconnected pixels within (rectangular)
# slices around islands (particularly the large ones) do
# not affect the source measurements.
selected_data = numpy.ma.where(
labelled_data[chunk] == label,
self.data_bgsubbed[chunk].data, -extract.BIGNUM
).filled(fill_value=-extract.BIGNUM)
island_list.append(
extract.Island(
selected_data,
self.rmsmap[chunk],
chunk,
analysis_threshold,
detectionthresholdmap[chunk],
self.beam,
deblend_nthresh,
DEBLEND_MINCONT,
STRUCTURING_ELEMENT
)
)
# If required, we can save the 'left overs' from the deblending and
# fitting processes for later analysis. This needs setting up here:
if self.residuals:
self.residuals_from_gauss_fitting = numpy.zeros(self.data.shape)
self.residuals_from_deblending = numpy.zeros(self.data.shape)
for island in island_list:
self.residuals_from_deblending[island.chunk] += (
island.data.filled(fill_value=0.))
# Deblend each of the islands to its consituent parts, if necessary
if deblend_nthresh:
deblended_list = map(lambda x: x.deblend(), island_list)
#deblended_list = [x.deblend() for x in island_list]
island_list = list(utils.flatten(deblended_list))
# Set up the fixed fit parameters if 'force beam' is on:
if force_beam:
fixed = {'semimajor': self.beam[0],
'semiminor': self.beam[1],
'theta': self.beam[2]}
else:
fixed = None
# Iterate over the list of islands and measure the source in each,
# appending it to the results list.
results = containers.ExtractionResults()
for island in island_list:
fit_results = island.fit(fixed=fixed)
if fit_results:
measurement, residual = fit_results
else:
# Failed to fit; drop this island and go to the next.
continue
try:
det = extract.Detection(measurement, self, chunk=island.chunk)
if (det.ra.error == float('inf') or
det.dec.error == float('inf')):
logger.warn('Bad fit from blind extraction at pixel coords:'
'%f %f - measurement discarded'
'(increase fitting margin?)', det.x, det.y )
else:
results.append(det)
except RuntimeError as e:
logger.error("Island not processed; unphysical?")
if self.residuals:
self.residuals_from_deblending[island.chunk] -= (
island.data.filled(fill_value=0.))
self.residuals_from_gauss_fitting[island.chunk] += residual
def is_usable(det):
# Check that both ends of each axis are usable; that is, that they
# fall within an unmasked part of the image.
# The axis will not likely fall exactly on a pixel number, so
# check all the surroundings.
def check_point(x, y):
x = (int(x), int(numpy.ceil(x)))
y = (int(y), int(numpy.ceil(y)))
for position in itertools.product(x, y):
try:
if self.data.mask[position[0], position[1]]:
# Point falls in mask
return False
except IndexError:
# Point falls completely outside image
return False
# Point is ok
return True
for point in (
(det.start_smaj_x, det.start_smaj_y),
(det.start_smin_x, det.start_smin_y),
(det.end_smaj_x, det.end_smaj_y),
(det.end_smin_x, det.end_smin_y)
):
if not check_point(*point):
logger.debug("Unphysical source at pixel %f, %f" % (det.x.value, det.y.value))
return False
return True
# Filter will return a list; ensure we return an ExtractionResults.
return containers.ExtractionResults(filter(is_usable, results))
|
transientskp/tkp
|
tkp/sourcefinder/image.py
|
Python
|
bsd-2-clause
| 39,254
|
[
"Gaussian"
] |
0c1e60717278a27ada6b884715a14f7950d20c0d9b2d660993c2d37573822eab
|
import logging, threading, time
from Queue import Queue, Empty
from galaxy import config, util, model, tools, jobs
log = logging.getLogger( __name__ )
# This class is designed to provide job dispatch fairness for Galaxy users. It uses a
# dictionary of session (key) Queue (value) pairs to ensure no user (session)
# can hog the Galaxy run queue.
# Each get() call will return a job from a consecutive session
# in the dict or throw an Empty exception if there are no jobs in any queue
class UserRoundRobin( object ):
"""
Class UserRoundRobin provides per-user job scheduling fairness. It uses a dictionary of session-Queue() pairs
to ensure no user/session can hog the Galaxy run queue. Each get() call will either return a job from a consecutive session
in the dict or throw an Empty exception if the dict has no jobs in any queues.
"""
# private: dictionary of queues (its not really private in Python)
__DOQ = {}
def __init__(self, app):
self.app = app
self.__DOQ = {}
self.keylist = []
self.iterator = None
# these are used to decide when to do a DOQ cleanup
self.cleanup_tstamp = time.time()
# get from ini/config and convert to secs
self.cleanup_mininterval = self.app.config.job_queue_cleanup_interval * 60
# Don't allow cleanup interval less than 5 minutes (should this be hardcoded ?)
if self.cleanup_mininterval < 300 :
self.cleanup_mininterval = 300
# locks for get and put methods
self.putlock = threading.Lock()
self.getlock = threading.Lock()
log.info("RoundRobin policy: initialized ")
# Insert a job in the dict of queues
def put(self, job):
self.putlock.acquire()
try :
# get this job's user/session id
sessid = job.get_session_id()
# Check if a queue already exists for user/session and add one if not
if self.__DOQ.has_key(sessid) :
self.__DOQ[sessid].put(job)
log.debug("RoundRobin queue: inserted new job for user/session = %d" % sessid)
else :
self.__DOQ[sessid] = Queue()
self.__DOQ[sessid].put(job)
log.debug("RoundRobin queue: user/session did not exist, created new jobqueue for session = %d" % sessid)
finally :
self.putlock.release()
# Return a job from the dictionary of queues. Each get() call tries to
# return a job from another queue in the dictionary .
# Throws Empty if it cant find any jobs in the dictionary of queues
# This method also does a cleanup of the dict regularly (specified)
def get(self) :
self.getlock.acquire()
try :
# get the next user/session in the dict
sessionid = self.__get_next_session()
if sessionid is not None :
log.debug("RoundRobin queue: retrieving job from job queue for session = %d" % sessionid)
return self.__DOQ[sessionid].get()
else :
# sessionid = None implies empty dictionary, throw back to caller
raise Empty
finally :
# Clean up DOQ
self.__timed_clean_up() #cleanup will happen at specified intervals
self.getlock.release()
# In case the Queue.get_nowait() method is used somewhere
def get_nowait(self):
return self.get()
# Returns the total number of jobs in the dict (counts all queues)
# Locks the get() and put() methods during calculation.
# Analogous to qsize() in Queue.Queue. Not guaranteed to be correct
def qsize(self):
try :
count = 0
self.getlock.acquire()
self.putlock.acquire()
for sessid in self.__DOQ :
count += self.__DOQ[sessid].qsize()
return count
finally :
self.putlock.release()
self.getlock.release()
# Internal method - get the next user/session (key) from any nonempty queue in the dict.
# Returns None if dictionary is empty.
# Note: We use a separate list to hold the DOQ keys and create an iterator from this list
# because an iterator created directly from DOQ barfs if DOQ size changes while iterating
def __get_next_session(self):
try:
# Check if this is either startup or previous iteration ended
if self.iterator is None :
self.keylist = self.__DOQ.keys()
self.iterator = iter(self.keylist)
# get the first available job from any nonempty queue
while 1 :
tmpsid = self.iterator.next()
if not self.__DOQ[tmpsid].empty() :
break
return tmpsid
except StopIteration :
# StopIteration implies we hit the end of the dict.
# re-initalize iterator, start from beginning (can we improve this ?)
try:
self.keylist = self.__DOQ.keys()
self.iterator = iter(self.keylist)
# get the first available job from any nonempty queue
while 1 :
tmpsid = self.iterator.next()
if not self.__DOQ[tmpsid].empty() :
break
return tmpsid
except StopIteration :
# this 2nd exception means there are no users queues at this moment
# returning None implies DOQ is empty
#log.debug("RoundRobin queue: there are no user queues at this time")
self.iterator = None # this will cause the iterator to be recreated next call
return None
# Cleanup method. Does a timestamp check to see if the preset minutes
# have passed and does a dict cleanup.
def __timed_clean_up(self):
# Note: Here again, we first create a temp list of keys from DOQ and
# then an iterator from the temp list because an iterator from DOQ breaks
# when deleting entries.
tmp_tstamp = time.time()
if ( (tmp_tstamp - self.cleanup_tstamp) >
self.cleanup_mininterval ) :
tmpkeylist = self.__DOQ.keys()
for each in tmpkeylist :
if self.__DOQ[each].empty() :
del(self.__DOQ[each])
log.debug("RoundRobin queue clean up: Removed job queue entry from dictionary for session = %d" % each)
self.cleanup_tstamp = tmp_tstamp
|
volpino/Yeps-EURAC
|
lib/galaxy/jobs/schedulingpolicy/roundrobin.py
|
Python
|
mit
| 6,801
|
[
"Galaxy"
] |
5eecbecac2ca149a3a794bf4f765ddd210384f2a022cf17a5d816298c3c004fd
|
from intermol.forces.abstract_type import AbstractType
class AbstractAngleType(AbstractType):
__slots__ = ['bondingtype1', 'bondingtype2', 'bondingtype3', 'c']
def __init__(self, bondingtype1, bondingtype2, bondingtype3, c=False):
"""An abstract representation of a generic angle type. """
super(AbstractAngleType, self).__init__()
self.bondingtype1 = bondingtype1
self.bondingtype2 = bondingtype2
self.bondingtype3 = bondingtype3
self.c = c # Is the bond constrained or not? Desmond only.
|
shirtsgroup/InterMol
|
intermol/forces/abstract_angle_type.py
|
Python
|
mit
| 551
|
[
"Desmond"
] |
c4b5f215274caba6e91a27fd9d49b020b6e32cc427264d385f18f26e4078850f
|
########################################################################
# File : InProcessComputingElement.py
# Author : Stuart Paterson
########################################################################
""" The simplest Computing Element instance that submits jobs locally.
This is also the standard "CE" invoked from the JobAgent
"""
__RCSID__ = "$Id$"
import os
import stat
from DIRAC import S_OK, S_ERROR
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
class InProcessComputingElement(ComputingElement):
#############################################################################
def __init__(self, ceUniqueID):
""" Standard constructor.
"""
super(InProcessComputingElement, self).__init__(ceUniqueID)
self.submittedJobs = 0
#############################################################################
def _addCEConfigDefaults(self):
""" Method to make sure all necessary Configuration Parameters are defined
"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults(self)
# Now InProcess specific ones
#############################################################################
def submitJob(self, executableFile, proxy, **kwargs):
""" Method to submit job (overriding base method).
:param str executableFile: file to execute via systemCall.
Normally the JobWrapperTemplate when invoked by the JobAgent.
:param str proxy: the proxy used for running the job (the payload). It will be dumped to a file.
"""
ret = getProxyInfo()
if not ret['OK']:
pilotProxy = None
else:
pilotProxy = ret['Value']['path']
self.log.notice('Pilot Proxy:', pilotProxy)
payloadEnv = dict(os.environ)
payloadProxy = ''
renewTask = None
if proxy:
self.log.verbose('Setting up proxy for payload')
result = self.writeProxyToFile(proxy)
if not result['OK']:
return result
payloadProxy = result['Value'] # proxy file location
# pilotProxy = os.environ['X509_USER_PROXY']
payloadEnv['X509_USER_PROXY'] = payloadProxy
self.log.verbose('Starting process for monitoring payload proxy')
result = gThreadScheduler.addPeriodicTask(self.proxyCheckPeriod, self.monitorProxy,
taskArgs=(pilotProxy, payloadProxy),
executions=0, elapsedTime=0)
if result['OK']:
renewTask = result['Value']
if not os.access(executableFile, 5):
os.chmod(executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
cmd = os.path.abspath(executableFile)
self.log.verbose('CE submission command: %s' % (cmd))
result = systemCall(0, cmd, callbackFunction=self.sendOutput, env=payloadEnv)
if payloadProxy:
os.unlink(payloadProxy)
if renewTask:
gThreadScheduler.removeTask(renewTask)
ret = S_OK()
if not result['OK']:
self.log.error('Fail to run InProcess', result['Message'])
elif result['Value'][0] > 128:
# negative exit values are returned as 256 - exit
self.log.warn('InProcess Job Execution Failed')
self.log.info('Exit status:', result['Value'][0] - 256)
if result['Value'][0] - 256 == -2:
error = 'JobWrapper initialization error'
elif result['Value'][0] - 256 == -1:
error = 'JobWrapper execution error'
else:
error = 'InProcess Job Execution Failed'
res = S_ERROR(error)
res['Value'] = result['Value'][0] - 256
return res
elif result['Value'][0] > 0:
self.log.warn('Fail in payload execution')
self.log.info('Exit status:', result['Value'][0])
ret['PayloadFailed'] = result['Value'][0]
else:
self.log.debug('InProcess CE result OK')
self.submittedJobs += 1
return ret
#############################################################################
def getCEStatus(self):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = 0
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
return result
#############################################################################
def monitorProxy(self, pilotProxy, payloadProxy):
""" Monitor the payload proxy and renew as necessary.
"""
return self._monitorProxy(pilotProxy, payloadProxy)
|
fstagni/DIRAC
|
Resources/Computing/InProcessComputingElement.py
|
Python
|
gpl-3.0
| 4,670
|
[
"DIRAC"
] |
7ccded491ae0a520d591f195dd7494451be902f7cf61e1a81bfe7003aae4cf1a
|
from __future__ import print_function
from bose_einstein import bose_einstein
from constant import htr_to_K, htr_to_meV, htr_to_THz
import argparser
import numpy as np
args = argparser.read_argument('Renormalize EPW calculation')
window = args.energy / htr_to_meV
if args.vb: offset = -8.75333295715961e-03
else: offset = 8.53193322468371e-03
if args.vb: band_str = '36'
else: band_str = '37'
temp_str = '%03dK' % args.temp
if args.acoustic:
temp_str = '%dK' % args.temp
qpt_str = '10000'
elif args.temp == 1:
qpt_str = '050000'
elif args.temp == 150:
qpt_str = '100000'
elif args.temp == 300:
qpt_str = '100000'
else:
print("temperature " + str(args.temp) + " not available")
exit()
for dir_str in ('gx', 'gy', 'gz'):
if args.acoustic:
filename = 'data/epw_all_28424_'+temp_str+'_5meV_acoustic_only/data_'+dir_str+'_'+band_str+'_10000.dat'
else:
filename = 'data/res_'+temp_str+'_1meV/data_'+dir_str+'_'+band_str+'_'+qpt_str+'.dat'
file_epw = open(filename, 'r')
first = True
for line in file_epw:
data = line.split()
eps = np.float(data[1]) - offset
ImS = np.float(data[2])
if first and args.method == 2:
zz = 1.0 / (1.0 + np.float(data[4]))
else:
zz = 1.0
if first or abs(eps) >= window:
print(args.temp, 2.0 * ImS * zz * htr_to_THz, zz)
if not first: break
first = False
|
mmdg-oxford/papers
|
Schlipf-PRL-2018/model/epw_lifetime.py
|
Python
|
gpl-3.0
| 1,370
|
[
"EPW"
] |
86c735b6e7751ff028c652f808999d6f6d3cb1cd9c4e1c2b118562f5b3480cd6
|
# -*- coding: utf-8 -*-
# Cypress -- A C++ interface to PyNN
# Copyright (C) 2016 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Reader of the binnf serialisation format. Note that this format is not
standardised and always in sync with the corresponding implementation in CppNAM.
"""
import numpy as np
import struct
class BinnfException(Exception):
"""
Exception type used throughout the binnf serialiser/deserialiser.
"""
pass
# Numbers and constants defining the serialisation format
BLOCK_START_SEQUENCE = 0x665a8cda
BLOCK_END_SEQUENCE = 0x420062cb
BLOCK_TYPE_MATRIX = 0x01
BLOCK_TYPE_LOG = 0x02
TYPE_INT8 = 0
TYPE_UINT8 = 1
TYPE_INT16 = 2
TYPE_UINT16 = 3
TYPE_INT32 = 4
TYPE_UINT32 = 5
TYPE_FLOAT32 = 6
TYPE_INT64 = 7
TYPE_FLOAT64 = 8
TYPE_MAP = {
TYPE_INT8: "int8",
TYPE_UINT8: "uint8",
TYPE_INT16: "int16",
TYPE_UINT16: "uint16",
TYPE_INT32: "int32",
TYPE_UINT32: "uint32",
TYPE_FLOAT32: "float32",
TYPE_INT64: "int64",
TYPE_FLOAT64: "float64"
}
INV_TYPE_MAP = {v: k for k, v in TYPE_MAP.iteritems()}
SEV_DEBUG = 10
SEV_INFO = 20
SEV_WARNING = 30
SEV_ERROR = 40
SEV_FATAL = 50
# Helper functions used to determine the length of a storage block
BLOCK_TYPE_LEN = 4
SIZE_LEN = 4
TYPE_LEN = 4
def _str_len(s):
"""
Returns the serialised length of a string in bytes.
"""
return SIZE_LEN + len(s)
def _header_len(header):
"""
Returns the serialised length of the header structure in bytes.
"""
res = SIZE_LEN
for elem in header:
res += _str_len(elem["name"]) + TYPE_LEN
return res
def _matrix_len(matrix):
"""
Returns the serialised length of a matrix in bytes.
"""
return SIZE_LEN + matrix.size * matrix.dtype.itemsize
def _matrix_block_len(name, header, matrix):
"""
Returns the total length of a binnf block in bytes.
"""
return (BLOCK_TYPE_LEN + _str_len(name) + _header_len(header) +
_matrix_len(matrix))
def _log_block_len(module, msg):
"""
Returns the total length of a binnf block in bytes.
"""
return (BLOCK_TYPE_LEN + 12 + _str_len(module) + _str_len(msg))
# Serialisation helper functions
def _write_int(fd, i):
fd.write(struct.pack("i", i))
def _write_double(fd, d):
fd.write(struct.pack("d", d))
def _write_str(fd, s):
fd.write(struct.pack("i", len(s)))
fd.write(s)
# Deserialisation helper functions
def _synchronise(fd, marker):
sync = 0
first = True
while True:
c = fd.read(1)
if not c:
if first:
return False
raise BinnfException("Unexpected end of file")
sync = (sync >> 8) | (ord(c[0]) << 24)
if sync == marker:
return True
first = False
def _read_int(fd):
data = fd.read(4)
if not data:
raise BinnfException("Unexpected end of file")
return struct.unpack("i", data)[0]
def _read_double(fd):
data = fd.read(8)
if not data:
raise BinnfException("Unexpected end of file")
return struct.unpack("d", data)[0]
def _read_str(fd):
data = fd.read(_read_int(fd))
if not data:
raise BinnfException("Unexpected end of file")
return data
def _tell(fd):
"""
Returns the current cursor position within the given file descriptor.
Implements the C++ behaviour of iostream::tellg(). Returns -1 if the feature
is not implemented (e.g. because we're reading from a stream).
"""
try:
return fd.tell()
except:
return -1
def header_to_dtype(header):
return np.dtype(map(lambda x: (x["name"], TYPE_MAP[x["type"]]), header))
def dtype_to_header(dt):
"""
Sorts the fields in the numpy datatype dt by their byte offset and converts
them into an array of dictionaries containing the name and type of each
entry.
"""
return map(lambda x: {"name": x[0], "type": INV_TYPE_MAP[x[1][0].name]},
sorted(dt.fields.items(), key=lambda x: x[1][1]))
def serialise_matrix(fd, name, matrix):
"""
Serialises a binnf data block.
:param name: is the data block name.
:param header: is the data block header, consisting of an array of
dictionaries containing "name" and "type" blocks.
:param matrix: matrix containing the data that should be serialised.
"""
# Fetch the matrix header from the matrix
matrix = np.require(matrix, requirements=["C_CONTIGUOUS"])
header = dtype_to_header(matrix.dtype)
# Write the block header
_write_int(fd, BLOCK_START_SEQUENCE)
_write_int(fd, _matrix_block_len(name, header, matrix))
_write_int(fd, BLOCK_TYPE_MATRIX)
# Write the name string
_write_str(fd, name)
# Write the data header
_write_int(fd, len(header))
for i in xrange(len(header)):
_write_str(fd, header[i]["name"])
_write_int(fd, header[i]["type"])
# Write the matrix data
rows = matrix.shape[0]
if (len(matrix.shape) == 1):
cols = len(matrix.dtype.descr)
else:
cols = matrix.shape[1]
if cols != len(header):
raise BinnfException(
"Disecrepancy between matrix number of columns and header")
_write_int(fd, rows)
if hasattr(matrix, 'tobytes'): # only exists since Numpy 1.9
fd.write(matrix.tobytes())
else:
matrix.tofile(fd, sep="")
# Finalise the block
_write_int(fd, BLOCK_END_SEQUENCE)
def serialise_log(fd, time, severity, module, msg):
"""
Serialises a log message and sends it to the receiving side.
:param time: is the Unix timestamp at which the message was recorded
:param severity: is the severity level of the message, should be one of
SEV_DEBUG, SEV_INFO, SEV_WARNING, SEV_ERROR or SEV_FATAL.
:param module: is the name of the module the error message originated from
:param msg: is the actual message that should be logged.
"""
_write_int(fd, BLOCK_START_SEQUENCE)
_write_int(fd, _log_block_len(module, msg))
_write_int(fd, BLOCK_TYPE_LOG)
_write_double(fd, time)
_write_int(fd, severity)
_write_str(fd, module)
_write_str(fd, msg)
_write_int(fd, BLOCK_END_SEQUENCE)
def deserialise_matrix(fd):
# Read the name
name = _read_str(fd)
# Read the header
header_len = _read_int(fd)
header = map(lambda _: {"name": "", "type": 0}, xrange(header_len))
for i in xrange(header_len):
header[i]["name"] = _read_str(fd)
header[i]["type"] = _read_int(fd)
# Read the data
rows = _read_int(fd)
fmt = header_to_dtype(header)
matrix = np.require(
np.frombuffer(
buffer(fd.read(rows * fmt.itemsize)), dtype=fmt),
requirements=["WRITEABLE", "C_CONTIGUOUS"])
return name, header, matrix
def deserialise_log(fd):
"""
Reads a log message from the given file descriptor. A log message consists
of a double containing the log timestamp, the severity string, the module
string, and the actual message.
"""
time = _read_double(fd)
severity = _read_int(fd)
module = _read_str(fd)
msg = _read_str(fd)
return time, severity, module, msg
def deserialise(fd):
"""
Deserialises a Binnf block from the sequence. Returns the block type and
a tuple containing the actual data
"""
# Read some meta-information, abort if we're at the end of the file
if not _synchronise(fd, BLOCK_START_SEQUENCE):
return None, None
block_len = _read_int(fd)
pos0 = _tell(fd)
block_type = _read_int(fd)
if block_type == BLOCK_TYPE_MATRIX:
res = deserialise_matrix(fd)
elif block_type == BLOCK_TYPE_LOG:
res = deserialise_log(fd)
else:
raise BinnfException("Unexpected block type")
# Make sure the block size was correct
pos1 = _tell(fd)
if pos0 >= 0 and pos1 >= 0 and pos1 - pos0 != block_len:
raise BinnfException("Invalid block length")
# Make sure the end of the block is reached
block_end = _read_int(fd)
if block_end != BLOCK_END_SEQUENCE:
raise BinnfException("Block end sequence not found")
return block_type, res
def read_network(fd):
EXPECTED_FIELDS = {
"populations": ["count", "type"],
"parameters": ["pid", "nid"],
"target": ["pid", "nid"],
"spike_times": ["times"],
"list_connection": ["nid_src", "nid_tar", "weight", "delay"],
"list_connection_header": ["pid_src", "pid_tar", "inh", "file"],
"group_connections":
["pid_src", "nid_src_start", "nid_src_end", "pid_tar", "nid_tar_start",
"nid_tar_end", "connector_id", "weight", "delay", "parameter", "self"],
}
def validate_matrix(name, matrix):
"""
Makes sure the matrix contains the correct fields.
"""
fields = matrix.dtype.fields
if (name in EXPECTED_FIELDS):
for n in EXPECTED_FIELDS[name]:
if not (n in fields):
raise BinnfException("Expected mandatory header field \"" +
name + "\"")
# Construct the network descriptor from the binnf data
network = {"parameters": [], "spike_times": [],
"signals": [], "list_connections": []}
target = None
while True:
# Deserialise a single input block
block_type, res = deserialise(fd)
# Abort once the end of the file is reached
if block_type is None:
break
elif block_type != BLOCK_TYPE_MATRIX:
raise BinnfException("Unexpected Binnf block!")
# Make sure all mandatory matrix fields are present
name, _, matrix = res
validate_matrix(name, matrix)
# Read the data matrices
if name == "populations":
if "populations" in network:
raise BinnfException(
"Only a single \"populations\" instance is supported")
network["populations"] = matrix
elif name == "list_connection_header":
if "list_connection_header" in network:
raise BinnfException(
"Only a single \"list_connection_header\" instance is supported")
network["list_connection_header"] = matrix
elif name == "list_connection":
network["list_connections"].append(matrix)
elif name == "group_connections":
if "group_connections" in network:
raise BinnfException(
"Only a single \"group_connections\" instance is supported")
network["group_connections"] = matrix
elif name == "parameters":
network["parameters"].append(matrix)
elif name == "signals":
network["signals"].append(matrix)
elif name == "target":
if matrix.size != 1:
raise BinnfException(
"Target matrix must have exactly one element")
target = {"pid": matrix[0]["pid"], "nid": matrix[0]["nid"]}
elif name == "spike_times":
if target is None:
raise BinnfException("Target neuron was not set")
network["spike_times"].append({
"pid": target["pid"],
"nid": target["nid"],
"times": matrix
})
target = None
else:
raise BinnfException("Unsupported matrix type \"" + name + "\"")
return network
# Headers used during serialisation
HEADER_TARGET = [{"name": "pid",
"type": TYPE_INT32}, {"name": "nid",
"type": TYPE_INT32}]
HEADER_TARGET_DTYPE = header_to_dtype(HEADER_TARGET)
HEADER_RUNTIMES = [{"name": "total",
"type": TYPE_FLOAT64}, {"name": "sim",
"type": TYPE_FLOAT64},
{"name": "initialize",
"type": TYPE_FLOAT64}, {"name": "finalize",
"type": TYPE_FLOAT64}]
HEADER_RUNTIMES_DTYPE = header_to_dtype(HEADER_RUNTIMES)
def write_result(fd, res):
"""
Serialises the simulation result to binnf.
:param fd: target file descriptor.
:param res: simulation result.
"""
for pid in xrange(len(res)):
for signal in res[pid]:
for nid in xrange(len(res[pid][signal])):
serialise_matrix(
fd,
"target",
np.array(
[(pid, nid)], dtype=HEADER_TARGET_DTYPE))
matrix = res[pid][signal][nid]
if signal == "spikes":
serialise_matrix(fd, "spike_times", matrix)
else:
serialise_matrix(fd, "trace_" + signal, matrix)
def write_runtimes(fd, times):
"""
Serialises the simulation runtimes to binnf.
:param fd: target file descriptor.
:param times: object containing "total", "sim", "initialize" and "finalize"
keys with the runtimes in seconds.
"""
serialise_matrix(
fd,
"runtimes",
np.array(
[(times["total"], times["sim"], times["initialize"],
times["finalize"])],
dtype=HEADER_RUNTIMES_DTYPE))
# Export definitions
__all__ = ["serialise_matrix", "serialise_log", "deserialise", "read_network",
"BinnfException"]
|
hbp-sanncs/cypress
|
resource/backend/pynn/binnf.py
|
Python
|
gpl-3.0
| 14,039
|
[
"NEURON"
] |
3ab253f7da351e9c92d38a3a218c5bebbe609f6f44bccecbe921f0c0e860fc86
|
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import gw as gw_c
class KnowValues(unittest.TestCase):
def test_si_ref(self):
""" This is GW """
mol = gto.M( verbose = 1, atom = '''H 0 0 0; H 0.17 0.7 0.587''', basis = 'cc-pvdz',)
gto_mf = scf.RHF(mol)
gto_mf.kernel()
gw = gw_c(mf=gto_mf, gto=mol)
ww = np.array([0.0+1j*4.0, 1.0+1j*0.1, -2.0-1j*0.1])
si0_fm = gw.si_c(ww)
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0053_gw_si_ref.py
|
Python
|
apache-2.0
| 522
|
[
"PySCF"
] |
2fbe7f512f24dfe478bac2c07a09c7907f7d75603c892eb01acbca93bde3d444
|
# -*- coding: utf-8 -*-
'''
*******************************************************************
* File: validation.py
* Description:
* Author: HarshaRani
* E-mail: hrani@ncbs.res.in
********************************************************************/
/**********************************************************************
** This program is part of 'MOOSE', the
** Messaging Object Oriented Simulation Environment,
** also known as GENESIS 3 base code.
** copyright (C) 2003-2017 Upinder S. Bhalla. and NCBS
Created : Thu May 12 10:19:00 2016(+0530)
Version
Last-Updated: Fri Jul 28 15:50:00 2017(+0530)
By:
**********************************************************************/
**********************************************************************/
2019
Jan 19: - returned errorMsg
'''
foundLibSBML_ = False
try:
from libsbml import *
foundLibSBML_ = True
except Exception as e:
pass
def validateModel(sbmlDoc):
if sbmlDoc.getNumErrors() > 0:
tobecontinued = False
validError = ""
for i in range(0,sbmlDoc.getNumErrors()):
validError = validError+sbmlDoc.getError(i).getMessage()
#print (validError)
return False, validError
if (not sbmlDoc):
print("validateModel: given a null SBML Document")
return False, "validateModel: given a null SBML Document"
consistencyMessages = ""
validationMessages = ""
noProblems = True
numCheckFailures = 0
numConsistencyErrors = 0
numConsistencyWarnings = 0
numValidationErrors = 0
numValidationWarnings = 0
# Once the whole model is done and before it gets written out,
# it's important to check that the whole model is in fact complete,
# consistent and valid.
numCheckFailures = sbmlDoc.checkInternalConsistency()
if (numCheckFailures > 0):
for i in range(0, numCheckFailures):
sbmlErr = sbmlDoc.getError(i)
if (sbmlErr.isFatal() or sbmlErr.isError()):
noProblems = False
numConsistencyErrors += 1
else:
numConsistencyWarnings += 1
constStr = sbmlDoc.printErrors()
if sbmlDoc.printErrors():
consistencyMessages = constStr
# If the internal checks fail, it makes little sense to attempt
# further validation, because the model may be too compromised to
# be properly interpreted.
if (numConsistencyErrors > 0):
consistencyMessages += "Further validation aborted."
else:
numCheckFailures = sbmlDoc.checkConsistency()
validationMessages;
#numCheckFailures = sbmlDoc.checkL3v1Compatibility()
if (numCheckFailures > 0):
for i in range(0, (numCheckFailures)):
consistencyMessages = sbmlDoc.getErrorLog().toString()
sbmlErr = sbmlDoc.getError(i)
if (sbmlErr.isFatal() or sbmlErr.isError()):
noProblems = False
numValidationErrors += 1
else:
numValidationWarnings += 1
warning = sbmlDoc.getErrorLog().toString()
oss = sbmlDoc.printErrors()
validationMessages = oss
if (noProblems):
return True,""
else:
if consistencyMessages is None:
consistencyMessages = ""
if consistencyMessages != "":
print("consistency Warning: " + consistencyMessages)
if (numConsistencyErrors > 0):
print("ERROR: encountered " +str(numConsistencyErrors) +" consistency error in model " +sbmlDoc.getModel().getId() +"'.")
if (numConsistencyWarnings > 0):
print("Notice: encountered " +str(numConsistencyWarnings) +" consistency warning in model " +sbmlDoc.getModel().getId() +"'.")
if (numValidationErrors > 0):
print("ERROR: encountered " + str(numValidationErrors) + " validation error in model " +sbmlDoc.getModel().getId() + "'.")
validationMessages = consistencyMessages
if (numValidationWarnings > 0):
print("Notice: encountered " +str(numValidationWarnings) +" validation warning in model " +sbmlDoc.getModel().getId() +"'.")
validationMessages = consistencyMessages
if validationMessages:
print(validationMessages)
return False,validationMessages
# return ( numConsistencyErrors == 0 and numValidationErrors == 0,
# consistencyMessages)
if __name__ == '__main__':
import libsbml
sbmlDoc = libsbml.readSBML('00001-sbml-l3v1.xml')
validateModel(sbmlDoc)
|
BhallaLab/moose-core
|
python/moose/SBML/validation.py
|
Python
|
gpl-3.0
| 4,606
|
[
"MOOSE"
] |
a562181f2d9e2888f6308f53a971e92da2f0135276f5fc225e49e62f6835ea47
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.